]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/apm/xgene/xgene_enet_main.c
Merge remote-tracking branch 'sound-current/for-linus'
[karo-tx-linux.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *          Ravi Patel <rapatel@apm.com>
6  *          Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 #include "xgene_enet_sgmac.h"
25 #include "xgene_enet_xgmac.h"
26
27 #define RES_ENET_CSR    0
28 #define RES_RING_CSR    1
29 #define RES_RING_CMD    2
30
31 static const struct of_device_id xgene_enet_of_match[];
32 static const struct acpi_device_id xgene_enet_acpi_match[];
33
34 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
35 {
36         struct xgene_enet_raw_desc16 *raw_desc;
37         int i;
38
39         for (i = 0; i < buf_pool->slots; i++) {
40                 raw_desc = &buf_pool->raw_desc16[i];
41
42                 /* Hardware expects descriptor in little endian format */
43                 raw_desc->m0 = cpu_to_le64(i |
44                                 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
45                                 SET_VAL(STASH, 3));
46         }
47 }
48
49 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
50                                      u32 nbuf)
51 {
52         struct sk_buff *skb;
53         struct xgene_enet_raw_desc16 *raw_desc;
54         struct xgene_enet_pdata *pdata;
55         struct net_device *ndev;
56         struct device *dev;
57         dma_addr_t dma_addr;
58         u32 tail = buf_pool->tail;
59         u32 slots = buf_pool->slots - 1;
60         u16 bufdatalen, len;
61         int i;
62
63         ndev = buf_pool->ndev;
64         dev = ndev_to_dev(buf_pool->ndev);
65         pdata = netdev_priv(ndev);
66         bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67         len = XGENE_ENET_MAX_MTU;
68
69         for (i = 0; i < nbuf; i++) {
70                 raw_desc = &buf_pool->raw_desc16[tail];
71
72                 skb = netdev_alloc_skb_ip_align(ndev, len);
73                 if (unlikely(!skb))
74                         return -ENOMEM;
75                 buf_pool->rx_skb[tail] = skb;
76
77                 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78                 if (dma_mapping_error(dev, dma_addr)) {
79                         netdev_err(ndev, "DMA mapping error\n");
80                         dev_kfree_skb_any(skb);
81                         return -EINVAL;
82                 }
83
84                 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85                                            SET_VAL(BUFDATALEN, bufdatalen) |
86                                            SET_BIT(COHERENT));
87                 tail = (tail + 1) & slots;
88         }
89
90         pdata->ring_ops->wr_cmd(buf_pool, nbuf);
91         buf_pool->tail = tail;
92
93         return 0;
94 }
95
96 static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
97 {
98         struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
99
100         return ((u16)pdata->rm << 10) | ring->num;
101 }
102
103 static u8 xgene_enet_hdr_len(const void *data)
104 {
105         const struct ethhdr *eth = data;
106
107         return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
108 }
109
110 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
111 {
112         struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
113         struct xgene_enet_raw_desc16 *raw_desc;
114         u32 slots = buf_pool->slots - 1;
115         u32 tail = buf_pool->tail;
116         u32 userinfo;
117         int i, len;
118
119         len = pdata->ring_ops->len(buf_pool);
120         for (i = 0; i < len; i++) {
121                 tail = (tail - 1) & slots;
122                 raw_desc = &buf_pool->raw_desc16[tail];
123
124                 /* Hardware stores descriptor in little endian format */
125                 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
126                 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
127         }
128
129         pdata->ring_ops->wr_cmd(buf_pool, -len);
130         buf_pool->tail = tail;
131 }
132
133 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
134 {
135         struct xgene_enet_desc_ring *rx_ring = data;
136
137         if (napi_schedule_prep(&rx_ring->napi)) {
138                 disable_irq_nosync(irq);
139                 __napi_schedule(&rx_ring->napi);
140         }
141
142         return IRQ_HANDLED;
143 }
144
145 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
146                                     struct xgene_enet_raw_desc *raw_desc)
147 {
148         struct sk_buff *skb;
149         struct device *dev;
150         skb_frag_t *frag;
151         dma_addr_t *frag_dma_addr;
152         u16 skb_index;
153         u8 status;
154         int i, ret = 0;
155
156         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
157         skb = cp_ring->cp_skb[skb_index];
158         frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
159
160         dev = ndev_to_dev(cp_ring->ndev);
161         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
162                          skb_headlen(skb),
163                          DMA_TO_DEVICE);
164
165         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
166                 frag = &skb_shinfo(skb)->frags[i];
167                 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
168                                DMA_TO_DEVICE);
169         }
170
171         /* Checking for error */
172         status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
173         if (unlikely(status > 2)) {
174                 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
175                                        status);
176                 ret = -EIO;
177         }
178
179         if (likely(skb)) {
180                 dev_kfree_skb_any(skb);
181         } else {
182                 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
183                 ret = -EIO;
184         }
185
186         return ret;
187 }
188
189 static u64 xgene_enet_work_msg(struct sk_buff *skb)
190 {
191         struct net_device *ndev = skb->dev;
192         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
193         struct iphdr *iph;
194         u8 l3hlen = 0, l4hlen = 0;
195         u8 ethhdr, proto = 0, csum_enable = 0;
196         u64 hopinfo = 0;
197         u32 hdr_len, mss = 0;
198         u32 i, len, nr_frags;
199
200         ethhdr = xgene_enet_hdr_len(skb->data);
201
202         if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
203             unlikely(skb->protocol != htons(ETH_P_8021Q)))
204                 goto out;
205
206         if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
207                 goto out;
208
209         iph = ip_hdr(skb);
210         if (unlikely(ip_is_fragment(iph)))
211                 goto out;
212
213         if (likely(iph->protocol == IPPROTO_TCP)) {
214                 l4hlen = tcp_hdrlen(skb) >> 2;
215                 csum_enable = 1;
216                 proto = TSO_IPPROTO_TCP;
217                 if (ndev->features & NETIF_F_TSO) {
218                         hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
219                         mss = skb_shinfo(skb)->gso_size;
220
221                         if (skb_is_nonlinear(skb)) {
222                                 len = skb_headlen(skb);
223                                 nr_frags = skb_shinfo(skb)->nr_frags;
224
225                                 for (i = 0; i < 2 && i < nr_frags; i++)
226                                         len += skb_shinfo(skb)->frags[i].size;
227
228                                 /* HW requires header must reside in 3 buffer */
229                                 if (unlikely(hdr_len > len)) {
230                                         if (skb_linearize(skb))
231                                                 return 0;
232                                 }
233                         }
234
235                         if (!mss || ((skb->len - hdr_len) <= mss))
236                                 goto out;
237
238                         if (mss != pdata->mss) {
239                                 pdata->mss = mss;
240                                 pdata->mac_ops->set_mss(pdata);
241                         }
242                         hopinfo |= SET_BIT(ET);
243                 }
244         } else if (iph->protocol == IPPROTO_UDP) {
245                 l4hlen = UDP_HDR_SIZE;
246                 csum_enable = 1;
247         }
248 out:
249         l3hlen = ip_hdrlen(skb) >> 2;
250         hopinfo |= SET_VAL(TCPHDR, l4hlen) |
251                   SET_VAL(IPHDR, l3hlen) |
252                   SET_VAL(ETHHDR, ethhdr) |
253                   SET_VAL(EC, csum_enable) |
254                   SET_VAL(IS, proto) |
255                   SET_BIT(IC) |
256                   SET_BIT(TYPE_ETH_WORK_MESSAGE);
257
258         return hopinfo;
259 }
260
261 static u16 xgene_enet_encode_len(u16 len)
262 {
263         return (len == BUFLEN_16K) ? 0 : len;
264 }
265
266 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
267 {
268         desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
269                                     SET_VAL(BUFDATALEN, len));
270 }
271
272 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
273 {
274         __le64 *exp_bufs;
275
276         exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
277         memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
278         ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
279
280         return exp_bufs;
281 }
282
283 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
284 {
285         return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
286 }
287
288 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
289                                     struct sk_buff *skb)
290 {
291         struct device *dev = ndev_to_dev(tx_ring->ndev);
292         struct xgene_enet_raw_desc *raw_desc;
293         __le64 *exp_desc = NULL, *exp_bufs = NULL;
294         dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
295         skb_frag_t *frag;
296         u16 tail = tx_ring->tail;
297         u64 hopinfo;
298         u32 len, hw_len;
299         u8 ll = 0, nv = 0, idx = 0;
300         bool split = false;
301         u32 size, offset, ell_bytes = 0;
302         u32 i, fidx, nr_frags, count = 1;
303
304         raw_desc = &tx_ring->raw_desc[tail];
305         tail = (tail + 1) & (tx_ring->slots - 1);
306         memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
307
308         hopinfo = xgene_enet_work_msg(skb);
309         if (!hopinfo)
310                 return -EINVAL;
311         raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
312                                    hopinfo);
313
314         len = skb_headlen(skb);
315         hw_len = xgene_enet_encode_len(len);
316
317         dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
318         if (dma_mapping_error(dev, dma_addr)) {
319                 netdev_err(tx_ring->ndev, "DMA mapping error\n");
320                 return -EINVAL;
321         }
322
323         /* Hardware expects descriptor in little endian format */
324         raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
325                                    SET_VAL(BUFDATALEN, hw_len) |
326                                    SET_BIT(COHERENT));
327
328         if (!skb_is_nonlinear(skb))
329                 goto out;
330
331         /* scatter gather */
332         nv = 1;
333         exp_desc = (void *)&tx_ring->raw_desc[tail];
334         tail = (tail + 1) & (tx_ring->slots - 1);
335         memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
336
337         nr_frags = skb_shinfo(skb)->nr_frags;
338         for (i = nr_frags; i < 4 ; i++)
339                 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
340
341         frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
342
343         for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
344                 if (!split) {
345                         frag = &skb_shinfo(skb)->frags[fidx];
346                         size = skb_frag_size(frag);
347                         offset = 0;
348
349                         pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
350                                                      DMA_TO_DEVICE);
351                         if (dma_mapping_error(dev, pbuf_addr))
352                                 return -EINVAL;
353
354                         frag_dma_addr[fidx] = pbuf_addr;
355                         fidx++;
356
357                         if (size > BUFLEN_16K)
358                                 split = true;
359                 }
360
361                 if (size > BUFLEN_16K) {
362                         len = BUFLEN_16K;
363                         size -= BUFLEN_16K;
364                 } else {
365                         len = size;
366                         split = false;
367                 }
368
369                 dma_addr = pbuf_addr + offset;
370                 hw_len = xgene_enet_encode_len(len);
371
372                 switch (i) {
373                 case 0:
374                 case 1:
375                 case 2:
376                         xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
377                         break;
378                 case 3:
379                         if (split || (fidx != nr_frags)) {
380                                 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
381                                 xgene_set_addr_len(exp_bufs, idx, dma_addr,
382                                                    hw_len);
383                                 idx++;
384                                 ell_bytes += len;
385                         } else {
386                                 xgene_set_addr_len(exp_desc, i, dma_addr,
387                                                    hw_len);
388                         }
389                         break;
390                 default:
391                         xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
392                         idx++;
393                         ell_bytes += len;
394                         break;
395                 }
396
397                 if (split)
398                         offset += BUFLEN_16K;
399         }
400         count++;
401
402         if (idx) {
403                 ll = 1;
404                 dma_addr = dma_map_single(dev, exp_bufs,
405                                           sizeof(u64) * MAX_EXP_BUFFS,
406                                           DMA_TO_DEVICE);
407                 if (dma_mapping_error(dev, dma_addr)) {
408                         dev_kfree_skb_any(skb);
409                         return -EINVAL;
410                 }
411                 i = ell_bytes >> LL_BYTES_LSB_LEN;
412                 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
413                                           SET_VAL(LL_BYTES_MSB, i) |
414                                           SET_VAL(LL_LEN, idx));
415                 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
416         }
417
418 out:
419         raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
420                                    SET_VAL(USERINFO, tx_ring->tail));
421         tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
422         tx_ring->tail = tail;
423
424         return count;
425 }
426
427 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
428                                          struct net_device *ndev)
429 {
430         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
431         struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
432         struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
433         u32 tx_level, cq_level;
434         int count;
435
436         tx_level = pdata->ring_ops->len(tx_ring);
437         cq_level = pdata->ring_ops->len(cp_ring);
438         if (unlikely(tx_level > pdata->tx_qcnt_hi ||
439                      cq_level > pdata->cp_qcnt_hi)) {
440                 netif_stop_queue(ndev);
441                 return NETDEV_TX_BUSY;
442         }
443
444         if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
445                 return NETDEV_TX_OK;
446
447         count = xgene_enet_setup_tx_desc(tx_ring, skb);
448         if (count <= 0) {
449                 dev_kfree_skb_any(skb);
450                 return NETDEV_TX_OK;
451         }
452
453         pdata->ring_ops->wr_cmd(tx_ring, count);
454         skb_tx_timestamp(skb);
455
456         pdata->stats.tx_packets++;
457         pdata->stats.tx_bytes += skb->len;
458
459         return NETDEV_TX_OK;
460 }
461
462 static void xgene_enet_skip_csum(struct sk_buff *skb)
463 {
464         struct iphdr *iph = ip_hdr(skb);
465
466         if (!ip_is_fragment(iph) ||
467             (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
468                 skb->ip_summed = CHECKSUM_UNNECESSARY;
469         }
470 }
471
472 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
473                                struct xgene_enet_raw_desc *raw_desc)
474 {
475         struct net_device *ndev;
476         struct xgene_enet_pdata *pdata;
477         struct device *dev;
478         struct xgene_enet_desc_ring *buf_pool;
479         u32 datalen, skb_index;
480         struct sk_buff *skb;
481         u8 status;
482         int ret = 0;
483
484         ndev = rx_ring->ndev;
485         pdata = netdev_priv(ndev);
486         dev = ndev_to_dev(rx_ring->ndev);
487         buf_pool = rx_ring->buf_pool;
488
489         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
490                          XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
491         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
492         skb = buf_pool->rx_skb[skb_index];
493
494         /* checking for error */
495         status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
496         if (unlikely(status > 2)) {
497                 dev_kfree_skb_any(skb);
498                 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
499                                        status);
500                 pdata->stats.rx_dropped++;
501                 ret = -EIO;
502                 goto out;
503         }
504
505         /* strip off CRC as HW isn't doing this */
506         datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
507         datalen = (datalen & DATALEN_MASK) - 4;
508         prefetch(skb->data - NET_IP_ALIGN);
509         skb_put(skb, datalen);
510
511         skb_checksum_none_assert(skb);
512         skb->protocol = eth_type_trans(skb, ndev);
513         if (likely((ndev->features & NETIF_F_IP_CSUM) &&
514                    skb->protocol == htons(ETH_P_IP))) {
515                 xgene_enet_skip_csum(skb);
516         }
517
518         pdata->stats.rx_packets++;
519         pdata->stats.rx_bytes += datalen;
520         napi_gro_receive(&rx_ring->napi, skb);
521 out:
522         if (--rx_ring->nbufpool == 0) {
523                 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
524                 rx_ring->nbufpool = NUM_BUFPOOL;
525         }
526
527         return ret;
528 }
529
530 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
531 {
532         return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
533 }
534
535 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
536                                    int budget)
537 {
538         struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
539         struct xgene_enet_raw_desc *raw_desc, *exp_desc;
540         u16 head = ring->head;
541         u16 slots = ring->slots - 1;
542         int ret, count = 0, processed = 0;
543
544         do {
545                 raw_desc = &ring->raw_desc[head];
546                 exp_desc = NULL;
547                 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
548                         break;
549
550                 /* read fpqnum field after dataaddr field */
551                 dma_rmb();
552                 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
553                         head = (head + 1) & slots;
554                         exp_desc = &ring->raw_desc[head];
555
556                         if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
557                                 head = (head - 1) & slots;
558                                 break;
559                         }
560                         dma_rmb();
561                         count++;
562                 }
563                 if (is_rx_desc(raw_desc))
564                         ret = xgene_enet_rx_frame(ring, raw_desc);
565                 else
566                         ret = xgene_enet_tx_completion(ring, raw_desc);
567                 xgene_enet_mark_desc_slot_empty(raw_desc);
568                 if (exp_desc)
569                         xgene_enet_mark_desc_slot_empty(exp_desc);
570
571                 head = (head + 1) & slots;
572                 count++;
573                 processed++;
574
575                 if (ret)
576                         break;
577         } while (--budget);
578
579         if (likely(count)) {
580                 pdata->ring_ops->wr_cmd(ring, -count);
581                 ring->head = head;
582
583                 if (netif_queue_stopped(ring->ndev)) {
584                         if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
585                                 netif_wake_queue(ring->ndev);
586                 }
587         }
588
589         return processed;
590 }
591
592 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
593 {
594         struct xgene_enet_desc_ring *ring;
595         int processed;
596
597         ring = container_of(napi, struct xgene_enet_desc_ring, napi);
598         processed = xgene_enet_process_ring(ring, budget);
599
600         if (processed != budget) {
601                 napi_complete(napi);
602                 enable_irq(ring->irq);
603         }
604
605         return processed;
606 }
607
608 static void xgene_enet_timeout(struct net_device *ndev)
609 {
610         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
611
612         pdata->mac_ops->reset(pdata);
613 }
614
615 static int xgene_enet_register_irq(struct net_device *ndev)
616 {
617         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
618         struct device *dev = ndev_to_dev(ndev);
619         struct xgene_enet_desc_ring *ring;
620         int ret;
621
622         ring = pdata->rx_ring;
623         ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
624                                IRQF_SHARED, ring->irq_name, ring);
625         if (ret)
626                 netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name);
627
628         if (pdata->cq_cnt) {
629                 ring = pdata->tx_ring->cp_ring;
630                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
631                                        IRQF_SHARED, ring->irq_name, ring);
632                 if (ret) {
633                         netdev_err(ndev, "Failed to request irq %s\n",
634                                    ring->irq_name);
635                 }
636         }
637
638         return ret;
639 }
640
641 static void xgene_enet_free_irq(struct net_device *ndev)
642 {
643         struct xgene_enet_pdata *pdata;
644         struct device *dev;
645
646         pdata = netdev_priv(ndev);
647         dev = ndev_to_dev(ndev);
648         devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
649
650         if (pdata->cq_cnt) {
651                 devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
652                               pdata->tx_ring->cp_ring);
653         }
654 }
655
656 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
657 {
658         struct napi_struct *napi;
659
660         napi = &pdata->rx_ring->napi;
661         napi_enable(napi);
662
663         if (pdata->cq_cnt) {
664                 napi = &pdata->tx_ring->cp_ring->napi;
665                 napi_enable(napi);
666         }
667 }
668
669 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
670 {
671         struct napi_struct *napi;
672
673         napi = &pdata->rx_ring->napi;
674         napi_disable(napi);
675
676         if (pdata->cq_cnt) {
677                 napi = &pdata->tx_ring->cp_ring->napi;
678                 napi_disable(napi);
679         }
680 }
681
682 static int xgene_enet_open(struct net_device *ndev)
683 {
684         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
685         struct xgene_mac_ops *mac_ops = pdata->mac_ops;
686         int ret;
687
688         mac_ops->tx_enable(pdata);
689         mac_ops->rx_enable(pdata);
690
691         ret = xgene_enet_register_irq(ndev);
692         if (ret)
693                 return ret;
694         xgene_enet_napi_enable(pdata);
695
696         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
697                 phy_start(pdata->phy_dev);
698         else
699                 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
700
701         netif_carrier_off(ndev);
702         netif_start_queue(ndev);
703
704         return ret;
705 }
706
707 static int xgene_enet_close(struct net_device *ndev)
708 {
709         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
710         struct xgene_mac_ops *mac_ops = pdata->mac_ops;
711
712         netif_stop_queue(ndev);
713
714         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
715                 phy_stop(pdata->phy_dev);
716         else
717                 cancel_delayed_work_sync(&pdata->link_work);
718
719         xgene_enet_napi_disable(pdata);
720         xgene_enet_free_irq(ndev);
721         xgene_enet_process_ring(pdata->rx_ring, -1);
722
723         mac_ops->tx_disable(pdata);
724         mac_ops->rx_disable(pdata);
725
726         return 0;
727 }
728
729 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
730 {
731         struct xgene_enet_pdata *pdata;
732         struct device *dev;
733
734         pdata = netdev_priv(ring->ndev);
735         dev = ndev_to_dev(ring->ndev);
736
737         pdata->ring_ops->clear(ring);
738         dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
739 }
740
741 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
742 {
743         struct xgene_enet_desc_ring *buf_pool;
744
745         if (pdata->tx_ring) {
746                 xgene_enet_delete_ring(pdata->tx_ring);
747                 pdata->tx_ring = NULL;
748         }
749
750         if (pdata->rx_ring) {
751                 buf_pool = pdata->rx_ring->buf_pool;
752                 xgene_enet_delete_bufpool(buf_pool);
753                 xgene_enet_delete_ring(buf_pool);
754                 xgene_enet_delete_ring(pdata->rx_ring);
755                 pdata->rx_ring = NULL;
756         }
757 }
758
759 static int xgene_enet_get_ring_size(struct device *dev,
760                                     enum xgene_enet_ring_cfgsize cfgsize)
761 {
762         int size = -EINVAL;
763
764         switch (cfgsize) {
765         case RING_CFGSIZE_512B:
766                 size = 0x200;
767                 break;
768         case RING_CFGSIZE_2KB:
769                 size = 0x800;
770                 break;
771         case RING_CFGSIZE_16KB:
772                 size = 0x4000;
773                 break;
774         case RING_CFGSIZE_64KB:
775                 size = 0x10000;
776                 break;
777         case RING_CFGSIZE_512KB:
778                 size = 0x80000;
779                 break;
780         default:
781                 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
782                 break;
783         }
784
785         return size;
786 }
787
788 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
789 {
790         struct xgene_enet_pdata *pdata;
791         struct device *dev;
792
793         if (!ring)
794                 return;
795
796         dev = ndev_to_dev(ring->ndev);
797         pdata = netdev_priv(ring->ndev);
798
799         if (ring->desc_addr) {
800                 pdata->ring_ops->clear(ring);
801                 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
802         }
803         devm_kfree(dev, ring);
804 }
805
806 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
807 {
808         struct device *dev = &pdata->pdev->dev;
809         struct xgene_enet_desc_ring *ring;
810
811         ring = pdata->tx_ring;
812         if (ring) {
813                 if (ring->cp_ring && ring->cp_ring->cp_skb)
814                         devm_kfree(dev, ring->cp_ring->cp_skb);
815                 if (ring->cp_ring && pdata->cq_cnt)
816                         xgene_enet_free_desc_ring(ring->cp_ring);
817                 xgene_enet_free_desc_ring(ring);
818         }
819
820         ring = pdata->rx_ring;
821         if (ring) {
822                 if (ring->buf_pool) {
823                         if (ring->buf_pool->rx_skb)
824                                 devm_kfree(dev, ring->buf_pool->rx_skb);
825                         xgene_enet_free_desc_ring(ring->buf_pool);
826                 }
827                 xgene_enet_free_desc_ring(ring);
828         }
829 }
830
831 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
832                                  struct xgene_enet_desc_ring *ring)
833 {
834         if ((pdata->enet_id == XGENE_ENET2) &&
835             (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
836                 return true;
837         }
838
839         return false;
840 }
841
842 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
843                                               struct xgene_enet_desc_ring *ring)
844 {
845         u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
846
847         return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
848 }
849
850 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
851                         struct net_device *ndev, u32 ring_num,
852                         enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
853 {
854         struct xgene_enet_desc_ring *ring;
855         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
856         struct device *dev = ndev_to_dev(ndev);
857         int size;
858
859         size = xgene_enet_get_ring_size(dev, cfgsize);
860         if (size < 0)
861                 return NULL;
862
863         ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
864                             GFP_KERNEL);
865         if (!ring)
866                 return NULL;
867
868         ring->ndev = ndev;
869         ring->num = ring_num;
870         ring->cfgsize = cfgsize;
871         ring->id = ring_id;
872
873         ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
874                                               GFP_KERNEL);
875         if (!ring->desc_addr) {
876                 devm_kfree(dev, ring);
877                 return NULL;
878         }
879         ring->size = size;
880
881         if (is_irq_mbox_required(pdata, ring)) {
882                 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
883                                 &ring->irq_mbox_dma, GFP_KERNEL);
884                 if (!ring->irq_mbox_addr) {
885                         dma_free_coherent(dev, size, ring->desc_addr,
886                                           ring->dma);
887                         devm_kfree(dev, ring);
888                         return NULL;
889                 }
890         }
891
892         ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
893         ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
894         ring = pdata->ring_ops->setup(ring);
895         netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
896                    ring->num, ring->size, ring->id, ring->slots);
897
898         return ring;
899 }
900
901 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
902 {
903         return (owner << 6) | (bufnum & GENMASK(5, 0));
904 }
905
906 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
907 {
908         enum xgene_ring_owner owner;
909
910         if (p->enet_id == XGENE_ENET1) {
911                 switch (p->phy_mode) {
912                 case PHY_INTERFACE_MODE_SGMII:
913                         owner = RING_OWNER_ETH0;
914                         break;
915                 default:
916                         owner = (!p->port_id) ? RING_OWNER_ETH0 :
917                                                 RING_OWNER_ETH1;
918                         break;
919                 }
920         } else {
921                 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
922         }
923
924         return owner;
925 }
926
927 static int xgene_enet_create_desc_rings(struct net_device *ndev)
928 {
929         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
930         struct device *dev = ndev_to_dev(ndev);
931         struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
932         struct xgene_enet_desc_ring *buf_pool = NULL;
933         enum xgene_ring_owner owner;
934         dma_addr_t dma_exp_bufs;
935         u8 cpu_bufnum = pdata->cpu_bufnum;
936         u8 eth_bufnum = pdata->eth_bufnum;
937         u8 bp_bufnum = pdata->bp_bufnum;
938         u16 ring_num = pdata->ring_num;
939         u16 ring_id;
940         int ret, size;
941
942         /* allocate rx descriptor ring */
943         owner = xgene_derive_ring_owner(pdata);
944         ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
945         rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
946                                               RING_CFGSIZE_16KB, ring_id);
947         if (!rx_ring) {
948                 ret = -ENOMEM;
949                 goto err;
950         }
951
952         /* allocate buffer pool for receiving packets */
953         owner = xgene_derive_ring_owner(pdata);
954         ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
955         buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
956                                                RING_CFGSIZE_2KB, ring_id);
957         if (!buf_pool) {
958                 ret = -ENOMEM;
959                 goto err;
960         }
961
962         rx_ring->nbufpool = NUM_BUFPOOL;
963         rx_ring->buf_pool = buf_pool;
964         rx_ring->irq = pdata->rx_irq;
965         if (!pdata->cq_cnt) {
966                 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
967                          ndev->name);
968         } else {
969                 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name);
970         }
971         buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
972                                         sizeof(struct sk_buff *), GFP_KERNEL);
973         if (!buf_pool->rx_skb) {
974                 ret = -ENOMEM;
975                 goto err;
976         }
977
978         buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
979         rx_ring->buf_pool = buf_pool;
980         pdata->rx_ring = rx_ring;
981
982         /* allocate tx descriptor ring */
983         owner = xgene_derive_ring_owner(pdata);
984         ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
985         tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
986                                               RING_CFGSIZE_16KB, ring_id);
987         if (!tx_ring) {
988                 ret = -ENOMEM;
989                 goto err;
990         }
991
992         size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
993         tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs,
994                                                 GFP_KERNEL);
995         if (!tx_ring->exp_bufs) {
996                 ret = -ENOMEM;
997                 goto err;
998         }
999
1000         pdata->tx_ring = tx_ring;
1001
1002         if (!pdata->cq_cnt) {
1003                 cp_ring = pdata->rx_ring;
1004         } else {
1005                 /* allocate tx completion descriptor ring */
1006                 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1007                 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1008                                                       RING_CFGSIZE_16KB,
1009                                                       ring_id);
1010                 if (!cp_ring) {
1011                         ret = -ENOMEM;
1012                         goto err;
1013                 }
1014                 cp_ring->irq = pdata->txc_irq;
1015                 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name);
1016         }
1017
1018         cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1019                                        sizeof(struct sk_buff *), GFP_KERNEL);
1020         if (!cp_ring->cp_skb) {
1021                 ret = -ENOMEM;
1022                 goto err;
1023         }
1024
1025         size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1026         cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1027                                               size, GFP_KERNEL);
1028         if (!cp_ring->frag_dma_addr) {
1029                 devm_kfree(dev, cp_ring->cp_skb);
1030                 ret = -ENOMEM;
1031                 goto err;
1032         }
1033
1034         pdata->tx_ring->cp_ring = cp_ring;
1035         pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1036
1037         pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
1038         pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
1039         pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
1040
1041         return 0;
1042
1043 err:
1044         xgene_enet_free_desc_rings(pdata);
1045         return ret;
1046 }
1047
1048 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1049                         struct net_device *ndev,
1050                         struct rtnl_link_stats64 *storage)
1051 {
1052         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1053         struct rtnl_link_stats64 *stats = &pdata->stats;
1054
1055         stats->rx_errors += stats->rx_length_errors +
1056                             stats->rx_crc_errors +
1057                             stats->rx_frame_errors +
1058                             stats->rx_fifo_errors;
1059         memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
1060
1061         return storage;
1062 }
1063
1064 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1065 {
1066         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1067         int ret;
1068
1069         ret = eth_mac_addr(ndev, addr);
1070         if (ret)
1071                 return ret;
1072         pdata->mac_ops->set_mac_addr(pdata);
1073
1074         return ret;
1075 }
1076
1077 static const struct net_device_ops xgene_ndev_ops = {
1078         .ndo_open = xgene_enet_open,
1079         .ndo_stop = xgene_enet_close,
1080         .ndo_start_xmit = xgene_enet_start_xmit,
1081         .ndo_tx_timeout = xgene_enet_timeout,
1082         .ndo_get_stats64 = xgene_enet_get_stats64,
1083         .ndo_change_mtu = eth_change_mtu,
1084         .ndo_set_mac_address = xgene_enet_set_mac_address,
1085 };
1086
1087 #ifdef CONFIG_ACPI
1088 static int xgene_get_port_id_acpi(struct device *dev,
1089                                   struct xgene_enet_pdata *pdata)
1090 {
1091         acpi_status status;
1092         u64 temp;
1093
1094         status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1095         if (ACPI_FAILURE(status)) {
1096                 pdata->port_id = 0;
1097         } else {
1098                 pdata->port_id = temp;
1099         }
1100
1101         return 0;
1102 }
1103 #endif
1104
1105 static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1106 {
1107         u32 id = 0;
1108         int ret;
1109
1110         ret = of_property_read_u32(dev->of_node, "port-id", &id);
1111         if (ret) {
1112                 pdata->port_id = 0;
1113                 ret = 0;
1114         } else {
1115                 pdata->port_id = id & BIT(0);
1116         }
1117
1118         return ret;
1119 }
1120
1121 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1122 {
1123         struct device *dev = &pdata->pdev->dev;
1124         int delay, ret;
1125
1126         ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1127         if (ret) {
1128                 pdata->tx_delay = 4;
1129                 return 0;
1130         }
1131
1132         if (delay < 0 || delay > 7) {
1133                 dev_err(dev, "Invalid tx-delay specified\n");
1134                 return -EINVAL;
1135         }
1136
1137         pdata->tx_delay = delay;
1138
1139         return 0;
1140 }
1141
1142 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1143 {
1144         struct device *dev = &pdata->pdev->dev;
1145         int delay, ret;
1146
1147         ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1148         if (ret) {
1149                 pdata->rx_delay = 2;
1150                 return 0;
1151         }
1152
1153         if (delay < 0 || delay > 7) {
1154                 dev_err(dev, "Invalid rx-delay specified\n");
1155                 return -EINVAL;
1156         }
1157
1158         pdata->rx_delay = delay;
1159
1160         return 0;
1161 }
1162
1163 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1164 {
1165         struct platform_device *pdev;
1166         struct net_device *ndev;
1167         struct device *dev;
1168         struct resource *res;
1169         void __iomem *base_addr;
1170         u32 offset;
1171         int ret = 0;
1172
1173         pdev = pdata->pdev;
1174         dev = &pdev->dev;
1175         ndev = pdata->ndev;
1176
1177         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1178         if (!res) {
1179                 dev_err(dev, "Resource enet_csr not defined\n");
1180                 return -ENODEV;
1181         }
1182         pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1183         if (!pdata->base_addr) {
1184                 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1185                 return -ENOMEM;
1186         }
1187
1188         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1189         if (!res) {
1190                 dev_err(dev, "Resource ring_csr not defined\n");
1191                 return -ENODEV;
1192         }
1193         pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1194                                                         resource_size(res));
1195         if (!pdata->ring_csr_addr) {
1196                 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1197                 return -ENOMEM;
1198         }
1199
1200         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1201         if (!res) {
1202                 dev_err(dev, "Resource ring_cmd not defined\n");
1203                 return -ENODEV;
1204         }
1205         pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1206                                                         resource_size(res));
1207         if (!pdata->ring_cmd_addr) {
1208                 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1209                 return -ENOMEM;
1210         }
1211
1212         if (dev->of_node)
1213                 ret = xgene_get_port_id_dt(dev, pdata);
1214 #ifdef CONFIG_ACPI
1215         else
1216                 ret = xgene_get_port_id_acpi(dev, pdata);
1217 #endif
1218         if (ret)
1219                 return ret;
1220
1221         if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1222                 eth_hw_addr_random(ndev);
1223
1224         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1225
1226         pdata->phy_mode = device_get_phy_mode(dev);
1227         if (pdata->phy_mode < 0) {
1228                 dev_err(dev, "Unable to get phy-connection-type\n");
1229                 return pdata->phy_mode;
1230         }
1231         if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1232             pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1233             pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1234                 dev_err(dev, "Incorrect phy-connection-type specified\n");
1235                 return -ENODEV;
1236         }
1237
1238         ret = xgene_get_tx_delay(pdata);
1239         if (ret)
1240                 return ret;
1241
1242         ret = xgene_get_rx_delay(pdata);
1243         if (ret)
1244                 return ret;
1245
1246         ret = platform_get_irq(pdev, 0);
1247         if (ret <= 0) {
1248                 dev_err(dev, "Unable to get ENET Rx IRQ\n");
1249                 ret = ret ? : -ENXIO;
1250                 return ret;
1251         }
1252         pdata->rx_irq = ret;
1253
1254         if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
1255                 ret = platform_get_irq(pdev, 1);
1256                 if (ret <= 0) {
1257                         pdata->cq_cnt = 0;
1258                         dev_info(dev, "Unable to get Tx completion IRQ,"
1259                                  "using Rx IRQ instead\n");
1260                 } else {
1261                         pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
1262                         pdata->txc_irq = ret;
1263                 }
1264         }
1265
1266         pdata->clk = devm_clk_get(&pdev->dev, NULL);
1267         if (IS_ERR(pdata->clk)) {
1268                 /* Firmware may have set up the clock already. */
1269                 dev_info(dev, "clocks have been setup already\n");
1270         }
1271
1272         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1273                 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1274         else
1275                 base_addr = pdata->base_addr;
1276         pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1277         pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1278         pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1279         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1280             pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1281                 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1282                 offset = (pdata->enet_id == XGENE_ENET1) ?
1283                           BLOCK_ETH_MAC_CSR_OFFSET :
1284                           X2_BLOCK_ETH_MAC_CSR_OFFSET;
1285                 pdata->mcx_mac_csr_addr = base_addr + offset;
1286         } else {
1287                 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1288                 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1289         }
1290         pdata->rx_buff_cnt = NUM_PKT_BUF;
1291
1292         return 0;
1293 }
1294
1295 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1296 {
1297         struct net_device *ndev = pdata->ndev;
1298         struct xgene_enet_desc_ring *buf_pool;
1299         u16 dst_ring_num;
1300         int ret;
1301
1302         ret = pdata->port_ops->reset(pdata);
1303         if (ret)
1304                 return ret;
1305
1306         ret = xgene_enet_create_desc_rings(ndev);
1307         if (ret) {
1308                 netdev_err(ndev, "Error in ring configuration\n");
1309                 return ret;
1310         }
1311
1312         /* setup buffer pool */
1313         buf_pool = pdata->rx_ring->buf_pool;
1314         xgene_enet_init_bufpool(buf_pool);
1315         ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1316         if (ret) {
1317                 xgene_enet_delete_desc_rings(pdata);
1318                 return ret;
1319         }
1320
1321         dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
1322         pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1323         pdata->mac_ops->init(pdata);
1324
1325         return ret;
1326 }
1327
1328 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1329 {
1330         switch (pdata->phy_mode) {
1331         case PHY_INTERFACE_MODE_RGMII:
1332                 pdata->mac_ops = &xgene_gmac_ops;
1333                 pdata->port_ops = &xgene_gport_ops;
1334                 pdata->rm = RM3;
1335                 break;
1336         case PHY_INTERFACE_MODE_SGMII:
1337                 pdata->mac_ops = &xgene_sgmac_ops;
1338                 pdata->port_ops = &xgene_sgport_ops;
1339                 pdata->rm = RM1;
1340                 break;
1341         default:
1342                 pdata->mac_ops = &xgene_xgmac_ops;
1343                 pdata->port_ops = &xgene_xgport_ops;
1344                 pdata->rm = RM0;
1345                 break;
1346         }
1347
1348         if (pdata->enet_id == XGENE_ENET1) {
1349                 switch (pdata->port_id) {
1350                 case 0:
1351                         pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1352                         pdata->eth_bufnum = START_ETH_BUFNUM_0;
1353                         pdata->bp_bufnum = START_BP_BUFNUM_0;
1354                         pdata->ring_num = START_RING_NUM_0;
1355                         break;
1356                 case 1:
1357                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1358                                 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1359                                 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1360                                 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1361                                 pdata->ring_num = XG_START_RING_NUM_1;
1362                         } else {
1363                                 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1364                                 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1365                                 pdata->bp_bufnum = START_BP_BUFNUM_1;
1366                                 pdata->ring_num = START_RING_NUM_1;
1367                         }
1368                         break;
1369                 default:
1370                         break;
1371                 }
1372                 pdata->ring_ops = &xgene_ring1_ops;
1373         } else {
1374                 switch (pdata->port_id) {
1375                 case 0:
1376                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1377                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1378                         pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1379                         pdata->ring_num = X2_START_RING_NUM_0;
1380                         break;
1381                 case 1:
1382                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1383                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1384                         pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1385                         pdata->ring_num = X2_START_RING_NUM_1;
1386                         break;
1387                 default:
1388                         break;
1389                 }
1390                 pdata->rm = RM0;
1391                 pdata->ring_ops = &xgene_ring2_ops;
1392         }
1393 }
1394
1395 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1396 {
1397         struct napi_struct *napi;
1398
1399         napi = &pdata->rx_ring->napi;
1400         netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
1401
1402         if (pdata->cq_cnt) {
1403                 napi = &pdata->tx_ring->cp_ring->napi;
1404                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1405                                NAPI_POLL_WEIGHT);
1406         }
1407 }
1408
1409 static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1410 {
1411         struct napi_struct *napi;
1412
1413         napi = &pdata->rx_ring->napi;
1414         netif_napi_del(napi);
1415
1416         if (pdata->cq_cnt) {
1417                 napi = &pdata->tx_ring->cp_ring->napi;
1418                 netif_napi_del(napi);
1419         }
1420 }
1421
1422 static int xgene_enet_probe(struct platform_device *pdev)
1423 {
1424         struct net_device *ndev;
1425         struct xgene_enet_pdata *pdata;
1426         struct device *dev = &pdev->dev;
1427         struct xgene_mac_ops *mac_ops;
1428         const struct of_device_id *of_id;
1429         int ret;
1430
1431         ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
1432         if (!ndev)
1433                 return -ENOMEM;
1434
1435         pdata = netdev_priv(ndev);
1436
1437         pdata->pdev = pdev;
1438         pdata->ndev = ndev;
1439         SET_NETDEV_DEV(ndev, dev);
1440         platform_set_drvdata(pdev, pdata);
1441         ndev->netdev_ops = &xgene_ndev_ops;
1442         xgene_enet_set_ethtool_ops(ndev);
1443         ndev->features |= NETIF_F_IP_CSUM |
1444                           NETIF_F_GSO |
1445                           NETIF_F_GRO |
1446                           NETIF_F_SG;
1447
1448         of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1449         if (of_id) {
1450                 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1451         }
1452 #ifdef CONFIG_ACPI
1453         else {
1454                 const struct acpi_device_id *acpi_id;
1455
1456                 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1457                 if (acpi_id)
1458                         pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1459         }
1460 #endif
1461         if (!pdata->enet_id) {
1462                 free_netdev(ndev);
1463                 return -ENODEV;
1464         }
1465
1466         ret = xgene_enet_get_resources(pdata);
1467         if (ret)
1468                 goto err;
1469
1470         xgene_enet_setup_ops(pdata);
1471
1472         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1473                 ndev->features |= NETIF_F_TSO;
1474                 pdata->mss = XGENE_ENET_MSS;
1475         }
1476         ndev->hw_features = ndev->features;
1477
1478         ret = register_netdev(ndev);
1479         if (ret) {
1480                 netdev_err(ndev, "Failed to register netdev\n");
1481                 goto err;
1482         }
1483
1484         ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1485         if (ret) {
1486                 netdev_err(ndev, "No usable DMA configuration\n");
1487                 goto err;
1488         }
1489
1490         ret = xgene_enet_init_hw(pdata);
1491         if (ret)
1492                 goto err;
1493
1494         xgene_enet_napi_add(pdata);
1495         mac_ops = pdata->mac_ops;
1496         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1497                 ret = xgene_enet_mdio_config(pdata);
1498         else
1499                 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
1500
1501         return ret;
1502 err:
1503         unregister_netdev(ndev);
1504         free_netdev(ndev);
1505         return ret;
1506 }
1507
1508 static int xgene_enet_remove(struct platform_device *pdev)
1509 {
1510         struct xgene_enet_pdata *pdata;
1511         struct xgene_mac_ops *mac_ops;
1512         struct net_device *ndev;
1513
1514         pdata = platform_get_drvdata(pdev);
1515         mac_ops = pdata->mac_ops;
1516         ndev = pdata->ndev;
1517
1518         mac_ops->rx_disable(pdata);
1519         mac_ops->tx_disable(pdata);
1520
1521         xgene_enet_napi_del(pdata);
1522         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1523                 xgene_enet_mdio_remove(pdata);
1524         unregister_netdev(ndev);
1525         xgene_enet_delete_desc_rings(pdata);
1526         pdata->port_ops->shutdown(pdata);
1527         free_netdev(ndev);
1528
1529         return 0;
1530 }
1531
1532 #ifdef CONFIG_ACPI
1533 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1534         { "APMC0D05", XGENE_ENET1},
1535         { "APMC0D30", XGENE_ENET1},
1536         { "APMC0D31", XGENE_ENET1},
1537         { "APMC0D3F", XGENE_ENET1},
1538         { "APMC0D26", XGENE_ENET2},
1539         { "APMC0D25", XGENE_ENET2},
1540         { }
1541 };
1542 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1543 #endif
1544
1545 #ifdef CONFIG_OF
1546 static const struct of_device_id xgene_enet_of_match[] = {
1547         {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
1548         {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1549         {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1550         {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1551         {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1552         {},
1553 };
1554
1555 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1556 #endif
1557
1558 static struct platform_driver xgene_enet_driver = {
1559         .driver = {
1560                    .name = "xgene-enet",
1561                    .of_match_table = of_match_ptr(xgene_enet_of_match),
1562                    .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1563         },
1564         .probe = xgene_enet_probe,
1565         .remove = xgene_enet_remove,
1566 };
1567
1568 module_platform_driver(xgene_enet_driver);
1569
1570 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1571 MODULE_VERSION(XGENE_DRV_VERSION);
1572 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1573 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1574 MODULE_LICENSE("GPL");