]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers: net: xgene: fix for ACPI support without ACPI
[karo-tx-linux.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *          Ravi Patel <rapatel@apm.com>
6  *          Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 #include "xgene_enet_sgmac.h"
25 #include "xgene_enet_xgmac.h"
26
27 #define RES_ENET_CSR    0
28 #define RES_RING_CSR    1
29 #define RES_RING_CMD    2
30
31 static const struct of_device_id xgene_enet_of_match[];
32 static const struct acpi_device_id xgene_enet_acpi_match[];
33
34 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
35 {
36         struct xgene_enet_raw_desc16 *raw_desc;
37         int i;
38
39         for (i = 0; i < buf_pool->slots; i++) {
40                 raw_desc = &buf_pool->raw_desc16[i];
41
42                 /* Hardware expects descriptor in little endian format */
43                 raw_desc->m0 = cpu_to_le64(i |
44                                 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
45                                 SET_VAL(STASH, 3));
46         }
47 }
48
49 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
50                                      u32 nbuf)
51 {
52         struct sk_buff *skb;
53         struct xgene_enet_raw_desc16 *raw_desc;
54         struct xgene_enet_pdata *pdata;
55         struct net_device *ndev;
56         struct device *dev;
57         dma_addr_t dma_addr;
58         u32 tail = buf_pool->tail;
59         u32 slots = buf_pool->slots - 1;
60         u16 bufdatalen, len;
61         int i;
62
63         ndev = buf_pool->ndev;
64         dev = ndev_to_dev(buf_pool->ndev);
65         pdata = netdev_priv(ndev);
66         bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67         len = XGENE_ENET_MAX_MTU;
68
69         for (i = 0; i < nbuf; i++) {
70                 raw_desc = &buf_pool->raw_desc16[tail];
71
72                 skb = netdev_alloc_skb_ip_align(ndev, len);
73                 if (unlikely(!skb))
74                         return -ENOMEM;
75                 buf_pool->rx_skb[tail] = skb;
76
77                 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78                 if (dma_mapping_error(dev, dma_addr)) {
79                         netdev_err(ndev, "DMA mapping error\n");
80                         dev_kfree_skb_any(skb);
81                         return -EINVAL;
82                 }
83
84                 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85                                            SET_VAL(BUFDATALEN, bufdatalen) |
86                                            SET_BIT(COHERENT));
87                 tail = (tail + 1) & slots;
88         }
89
90         pdata->ring_ops->wr_cmd(buf_pool, nbuf);
91         buf_pool->tail = tail;
92
93         return 0;
94 }
95
96 static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
97 {
98         struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
99
100         return ((u16)pdata->rm << 10) | ring->num;
101 }
102
103 static u8 xgene_enet_hdr_len(const void *data)
104 {
105         const struct ethhdr *eth = data;
106
107         return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
108 }
109
110 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
111 {
112         struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
113         struct xgene_enet_raw_desc16 *raw_desc;
114         u32 slots = buf_pool->slots - 1;
115         u32 tail = buf_pool->tail;
116         u32 userinfo;
117         int i, len;
118
119         len = pdata->ring_ops->len(buf_pool);
120         for (i = 0; i < len; i++) {
121                 tail = (tail - 1) & slots;
122                 raw_desc = &buf_pool->raw_desc16[tail];
123
124                 /* Hardware stores descriptor in little endian format */
125                 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
126                 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
127         }
128
129         pdata->ring_ops->wr_cmd(buf_pool, -len);
130         buf_pool->tail = tail;
131 }
132
133 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
134 {
135         struct xgene_enet_desc_ring *rx_ring = data;
136
137         if (napi_schedule_prep(&rx_ring->napi)) {
138                 disable_irq_nosync(irq);
139                 __napi_schedule(&rx_ring->napi);
140         }
141
142         return IRQ_HANDLED;
143 }
144
145 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
146                                     struct xgene_enet_raw_desc *raw_desc)
147 {
148         struct sk_buff *skb;
149         struct device *dev;
150         u16 skb_index;
151         u8 status;
152         int ret = 0;
153
154         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
155         skb = cp_ring->cp_skb[skb_index];
156
157         dev = ndev_to_dev(cp_ring->ndev);
158         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
159                          GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
160                          DMA_TO_DEVICE);
161
162         /* Checking for error */
163         status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
164         if (unlikely(status > 2)) {
165                 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
166                                        status);
167                 ret = -EIO;
168         }
169
170         if (likely(skb)) {
171                 dev_kfree_skb_any(skb);
172         } else {
173                 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
174                 ret = -EIO;
175         }
176
177         return ret;
178 }
179
180 static u64 xgene_enet_work_msg(struct sk_buff *skb)
181 {
182         struct iphdr *iph;
183         u8 l3hlen, l4hlen = 0;
184         u8 csum_enable = 0;
185         u8 proto = 0;
186         u8 ethhdr;
187         u64 hopinfo;
188
189         if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
190             unlikely(skb->protocol != htons(ETH_P_8021Q)))
191                 goto out;
192
193         if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
194                 goto out;
195
196         iph = ip_hdr(skb);
197         if (unlikely(ip_is_fragment(iph)))
198                 goto out;
199
200         if (likely(iph->protocol == IPPROTO_TCP)) {
201                 l4hlen = tcp_hdrlen(skb) >> 2;
202                 csum_enable = 1;
203                 proto = TSO_IPPROTO_TCP;
204         } else if (iph->protocol == IPPROTO_UDP) {
205                 l4hlen = UDP_HDR_SIZE;
206                 csum_enable = 1;
207         }
208 out:
209         l3hlen = ip_hdrlen(skb) >> 2;
210         ethhdr = xgene_enet_hdr_len(skb->data);
211         hopinfo = SET_VAL(TCPHDR, l4hlen) |
212                   SET_VAL(IPHDR, l3hlen) |
213                   SET_VAL(ETHHDR, ethhdr) |
214                   SET_VAL(EC, csum_enable) |
215                   SET_VAL(IS, proto) |
216                   SET_BIT(IC) |
217                   SET_BIT(TYPE_ETH_WORK_MESSAGE);
218
219         return hopinfo;
220 }
221
222 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
223                                     struct sk_buff *skb)
224 {
225         struct device *dev = ndev_to_dev(tx_ring->ndev);
226         struct xgene_enet_raw_desc *raw_desc;
227         dma_addr_t dma_addr;
228         u16 tail = tx_ring->tail;
229         u64 hopinfo;
230
231         raw_desc = &tx_ring->raw_desc[tail];
232         memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
233
234         dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
235         if (dma_mapping_error(dev, dma_addr)) {
236                 netdev_err(tx_ring->ndev, "DMA mapping error\n");
237                 return -EINVAL;
238         }
239
240         /* Hardware expects descriptor in little endian format */
241         raw_desc->m0 = cpu_to_le64(tail);
242         raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
243                                    SET_VAL(BUFDATALEN, skb->len) |
244                                    SET_BIT(COHERENT));
245         hopinfo = xgene_enet_work_msg(skb);
246         raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
247                                    hopinfo);
248         tx_ring->cp_ring->cp_skb[tail] = skb;
249
250         return 0;
251 }
252
253 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
254                                          struct net_device *ndev)
255 {
256         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
257         struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
258         struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
259         u32 tx_level, cq_level;
260
261         tx_level = pdata->ring_ops->len(tx_ring);
262         cq_level = pdata->ring_ops->len(cp_ring);
263         if (unlikely(tx_level > pdata->tx_qcnt_hi ||
264                      cq_level > pdata->cp_qcnt_hi)) {
265                 netif_stop_queue(ndev);
266                 return NETDEV_TX_BUSY;
267         }
268
269         if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
270                 dev_kfree_skb_any(skb);
271                 return NETDEV_TX_OK;
272         }
273
274         pdata->ring_ops->wr_cmd(tx_ring, 1);
275         skb_tx_timestamp(skb);
276         tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
277
278         pdata->stats.tx_packets++;
279         pdata->stats.tx_bytes += skb->len;
280
281         return NETDEV_TX_OK;
282 }
283
284 static void xgene_enet_skip_csum(struct sk_buff *skb)
285 {
286         struct iphdr *iph = ip_hdr(skb);
287
288         if (!ip_is_fragment(iph) ||
289             (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
290                 skb->ip_summed = CHECKSUM_UNNECESSARY;
291         }
292 }
293
294 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
295                                struct xgene_enet_raw_desc *raw_desc)
296 {
297         struct net_device *ndev;
298         struct xgene_enet_pdata *pdata;
299         struct device *dev;
300         struct xgene_enet_desc_ring *buf_pool;
301         u32 datalen, skb_index;
302         struct sk_buff *skb;
303         u8 status;
304         int ret = 0;
305
306         ndev = rx_ring->ndev;
307         pdata = netdev_priv(ndev);
308         dev = ndev_to_dev(rx_ring->ndev);
309         buf_pool = rx_ring->buf_pool;
310
311         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
312                          XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
313         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
314         skb = buf_pool->rx_skb[skb_index];
315
316         /* checking for error */
317         status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
318         if (unlikely(status > 2)) {
319                 dev_kfree_skb_any(skb);
320                 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
321                                        status);
322                 pdata->stats.rx_dropped++;
323                 ret = -EIO;
324                 goto out;
325         }
326
327         /* strip off CRC as HW isn't doing this */
328         datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
329         datalen -= 4;
330         prefetch(skb->data - NET_IP_ALIGN);
331         skb_put(skb, datalen);
332
333         skb_checksum_none_assert(skb);
334         skb->protocol = eth_type_trans(skb, ndev);
335         if (likely((ndev->features & NETIF_F_IP_CSUM) &&
336                    skb->protocol == htons(ETH_P_IP))) {
337                 xgene_enet_skip_csum(skb);
338         }
339
340         pdata->stats.rx_packets++;
341         pdata->stats.rx_bytes += datalen;
342         napi_gro_receive(&rx_ring->napi, skb);
343 out:
344         if (--rx_ring->nbufpool == 0) {
345                 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
346                 rx_ring->nbufpool = NUM_BUFPOOL;
347         }
348
349         return ret;
350 }
351
352 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
353 {
354         return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
355 }
356
357 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
358                                    int budget)
359 {
360         struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
361         struct xgene_enet_raw_desc *raw_desc;
362         u16 head = ring->head;
363         u16 slots = ring->slots - 1;
364         int ret, count = 0;
365
366         do {
367                 raw_desc = &ring->raw_desc[head];
368                 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
369                         break;
370
371                 /* read fpqnum field after dataaddr field */
372                 dma_rmb();
373                 if (is_rx_desc(raw_desc))
374                         ret = xgene_enet_rx_frame(ring, raw_desc);
375                 else
376                         ret = xgene_enet_tx_completion(ring, raw_desc);
377                 xgene_enet_mark_desc_slot_empty(raw_desc);
378
379                 head = (head + 1) & slots;
380                 count++;
381
382                 if (ret)
383                         break;
384         } while (--budget);
385
386         if (likely(count)) {
387                 pdata->ring_ops->wr_cmd(ring, -count);
388                 ring->head = head;
389
390                 if (netif_queue_stopped(ring->ndev)) {
391                         if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
392                                 netif_wake_queue(ring->ndev);
393                 }
394         }
395
396         return count;
397 }
398
399 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
400 {
401         struct xgene_enet_desc_ring *ring;
402         int processed;
403
404         ring = container_of(napi, struct xgene_enet_desc_ring, napi);
405         processed = xgene_enet_process_ring(ring, budget);
406
407         if (processed != budget) {
408                 napi_complete(napi);
409                 enable_irq(ring->irq);
410         }
411
412         return processed;
413 }
414
415 static void xgene_enet_timeout(struct net_device *ndev)
416 {
417         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
418
419         pdata->mac_ops->reset(pdata);
420 }
421
422 static int xgene_enet_register_irq(struct net_device *ndev)
423 {
424         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
425         struct device *dev = ndev_to_dev(ndev);
426         struct xgene_enet_desc_ring *ring;
427         int ret;
428
429         ring = pdata->rx_ring;
430         ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
431                                IRQF_SHARED, ring->irq_name, ring);
432         if (ret)
433                 netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name);
434
435         if (pdata->cq_cnt) {
436                 ring = pdata->tx_ring->cp_ring;
437                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
438                                        IRQF_SHARED, ring->irq_name, ring);
439                 if (ret) {
440                         netdev_err(ndev, "Failed to request irq %s\n",
441                                    ring->irq_name);
442                 }
443         }
444
445         return ret;
446 }
447
448 static void xgene_enet_free_irq(struct net_device *ndev)
449 {
450         struct xgene_enet_pdata *pdata;
451         struct device *dev;
452
453         pdata = netdev_priv(ndev);
454         dev = ndev_to_dev(ndev);
455         devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
456
457         if (pdata->cq_cnt) {
458                 devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
459                               pdata->tx_ring->cp_ring);
460         }
461 }
462
463 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
464 {
465         struct napi_struct *napi;
466
467         napi = &pdata->rx_ring->napi;
468         napi_enable(napi);
469
470         if (pdata->cq_cnt) {
471                 napi = &pdata->tx_ring->cp_ring->napi;
472                 napi_enable(napi);
473         }
474 }
475
476 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
477 {
478         struct napi_struct *napi;
479
480         napi = &pdata->rx_ring->napi;
481         napi_disable(napi);
482
483         if (pdata->cq_cnt) {
484                 napi = &pdata->tx_ring->cp_ring->napi;
485                 napi_disable(napi);
486         }
487 }
488
489 static int xgene_enet_open(struct net_device *ndev)
490 {
491         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
492         struct xgene_mac_ops *mac_ops = pdata->mac_ops;
493         int ret;
494
495         mac_ops->tx_enable(pdata);
496         mac_ops->rx_enable(pdata);
497
498         ret = xgene_enet_register_irq(ndev);
499         if (ret)
500                 return ret;
501         xgene_enet_napi_enable(pdata);
502
503         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
504                 phy_start(pdata->phy_dev);
505         else
506                 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
507
508         netif_carrier_off(ndev);
509         netif_start_queue(ndev);
510
511         return ret;
512 }
513
514 static int xgene_enet_close(struct net_device *ndev)
515 {
516         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
517         struct xgene_mac_ops *mac_ops = pdata->mac_ops;
518
519         netif_stop_queue(ndev);
520
521         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
522                 phy_stop(pdata->phy_dev);
523         else
524                 cancel_delayed_work_sync(&pdata->link_work);
525
526         xgene_enet_napi_disable(pdata);
527         xgene_enet_free_irq(ndev);
528         xgene_enet_process_ring(pdata->rx_ring, -1);
529
530         mac_ops->tx_disable(pdata);
531         mac_ops->rx_disable(pdata);
532
533         return 0;
534 }
535
536 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
537 {
538         struct xgene_enet_pdata *pdata;
539         struct device *dev;
540
541         pdata = netdev_priv(ring->ndev);
542         dev = ndev_to_dev(ring->ndev);
543
544         pdata->ring_ops->clear(ring);
545         dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
546 }
547
548 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
549 {
550         struct xgene_enet_desc_ring *buf_pool;
551
552         if (pdata->tx_ring) {
553                 xgene_enet_delete_ring(pdata->tx_ring);
554                 pdata->tx_ring = NULL;
555         }
556
557         if (pdata->rx_ring) {
558                 buf_pool = pdata->rx_ring->buf_pool;
559                 xgene_enet_delete_bufpool(buf_pool);
560                 xgene_enet_delete_ring(buf_pool);
561                 xgene_enet_delete_ring(pdata->rx_ring);
562                 pdata->rx_ring = NULL;
563         }
564 }
565
566 static int xgene_enet_get_ring_size(struct device *dev,
567                                     enum xgene_enet_ring_cfgsize cfgsize)
568 {
569         int size = -EINVAL;
570
571         switch (cfgsize) {
572         case RING_CFGSIZE_512B:
573                 size = 0x200;
574                 break;
575         case RING_CFGSIZE_2KB:
576                 size = 0x800;
577                 break;
578         case RING_CFGSIZE_16KB:
579                 size = 0x4000;
580                 break;
581         case RING_CFGSIZE_64KB:
582                 size = 0x10000;
583                 break;
584         case RING_CFGSIZE_512KB:
585                 size = 0x80000;
586                 break;
587         default:
588                 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
589                 break;
590         }
591
592         return size;
593 }
594
595 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
596 {
597         struct xgene_enet_pdata *pdata;
598         struct device *dev;
599
600         if (!ring)
601                 return;
602
603         dev = ndev_to_dev(ring->ndev);
604         pdata = netdev_priv(ring->ndev);
605
606         if (ring->desc_addr) {
607                 pdata->ring_ops->clear(ring);
608                 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
609         }
610         devm_kfree(dev, ring);
611 }
612
613 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
614 {
615         struct device *dev = &pdata->pdev->dev;
616         struct xgene_enet_desc_ring *ring;
617
618         ring = pdata->tx_ring;
619         if (ring) {
620                 if (ring->cp_ring && ring->cp_ring->cp_skb)
621                         devm_kfree(dev, ring->cp_ring->cp_skb);
622                 if (ring->cp_ring && pdata->cq_cnt)
623                         xgene_enet_free_desc_ring(ring->cp_ring);
624                 xgene_enet_free_desc_ring(ring);
625         }
626
627         ring = pdata->rx_ring;
628         if (ring) {
629                 if (ring->buf_pool) {
630                         if (ring->buf_pool->rx_skb)
631                                 devm_kfree(dev, ring->buf_pool->rx_skb);
632                         xgene_enet_free_desc_ring(ring->buf_pool);
633                 }
634                 xgene_enet_free_desc_ring(ring);
635         }
636 }
637
638 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
639                                  struct xgene_enet_desc_ring *ring)
640 {
641         if ((pdata->enet_id == XGENE_ENET2) &&
642             (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
643                 return true;
644         }
645
646         return false;
647 }
648
649 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
650                                               struct xgene_enet_desc_ring *ring)
651 {
652         u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
653
654         return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
655 }
656
657 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
658                         struct net_device *ndev, u32 ring_num,
659                         enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
660 {
661         struct xgene_enet_desc_ring *ring;
662         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
663         struct device *dev = ndev_to_dev(ndev);
664         int size;
665
666         size = xgene_enet_get_ring_size(dev, cfgsize);
667         if (size < 0)
668                 return NULL;
669
670         ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
671                             GFP_KERNEL);
672         if (!ring)
673                 return NULL;
674
675         ring->ndev = ndev;
676         ring->num = ring_num;
677         ring->cfgsize = cfgsize;
678         ring->id = ring_id;
679
680         ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
681                                               GFP_KERNEL);
682         if (!ring->desc_addr) {
683                 devm_kfree(dev, ring);
684                 return NULL;
685         }
686         ring->size = size;
687
688         if (is_irq_mbox_required(pdata, ring)) {
689                 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
690                                 &ring->irq_mbox_dma, GFP_KERNEL);
691                 if (!ring->irq_mbox_addr) {
692                         dma_free_coherent(dev, size, ring->desc_addr,
693                                           ring->dma);
694                         devm_kfree(dev, ring);
695                         return NULL;
696                 }
697         }
698
699         ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
700         ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
701         ring = pdata->ring_ops->setup(ring);
702         netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
703                    ring->num, ring->size, ring->id, ring->slots);
704
705         return ring;
706 }
707
708 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
709 {
710         return (owner << 6) | (bufnum & GENMASK(5, 0));
711 }
712
713 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
714 {
715         enum xgene_ring_owner owner;
716
717         if (p->enet_id == XGENE_ENET1) {
718                 switch (p->phy_mode) {
719                 case PHY_INTERFACE_MODE_SGMII:
720                         owner = RING_OWNER_ETH0;
721                         break;
722                 default:
723                         owner = (!p->port_id) ? RING_OWNER_ETH0 :
724                                                 RING_OWNER_ETH1;
725                         break;
726                 }
727         } else {
728                 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
729         }
730
731         return owner;
732 }
733
734 static int xgene_enet_create_desc_rings(struct net_device *ndev)
735 {
736         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
737         struct device *dev = ndev_to_dev(ndev);
738         struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
739         struct xgene_enet_desc_ring *buf_pool = NULL;
740         enum xgene_ring_owner owner;
741         u8 cpu_bufnum = pdata->cpu_bufnum;
742         u8 eth_bufnum = pdata->eth_bufnum;
743         u8 bp_bufnum = pdata->bp_bufnum;
744         u16 ring_num = pdata->ring_num;
745         u16 ring_id;
746         int ret;
747
748         /* allocate rx descriptor ring */
749         owner = xgene_derive_ring_owner(pdata);
750         ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
751         rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
752                                               RING_CFGSIZE_16KB, ring_id);
753         if (!rx_ring) {
754                 ret = -ENOMEM;
755                 goto err;
756         }
757
758         /* allocate buffer pool for receiving packets */
759         owner = xgene_derive_ring_owner(pdata);
760         ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
761         buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
762                                                RING_CFGSIZE_2KB, ring_id);
763         if (!buf_pool) {
764                 ret = -ENOMEM;
765                 goto err;
766         }
767
768         rx_ring->nbufpool = NUM_BUFPOOL;
769         rx_ring->buf_pool = buf_pool;
770         rx_ring->irq = pdata->rx_irq;
771         if (!pdata->cq_cnt) {
772                 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
773                          ndev->name);
774         } else {
775                 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name);
776         }
777         buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
778                                         sizeof(struct sk_buff *), GFP_KERNEL);
779         if (!buf_pool->rx_skb) {
780                 ret = -ENOMEM;
781                 goto err;
782         }
783
784         buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
785         rx_ring->buf_pool = buf_pool;
786         pdata->rx_ring = rx_ring;
787
788         /* allocate tx descriptor ring */
789         owner = xgene_derive_ring_owner(pdata);
790         ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
791         tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
792                                               RING_CFGSIZE_16KB, ring_id);
793         if (!tx_ring) {
794                 ret = -ENOMEM;
795                 goto err;
796         }
797         pdata->tx_ring = tx_ring;
798
799         if (!pdata->cq_cnt) {
800                 cp_ring = pdata->rx_ring;
801         } else {
802                 /* allocate tx completion descriptor ring */
803                 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
804                 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
805                                                       RING_CFGSIZE_16KB,
806                                                       ring_id);
807                 if (!cp_ring) {
808                         ret = -ENOMEM;
809                         goto err;
810                 }
811                 cp_ring->irq = pdata->txc_irq;
812                 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name);
813         }
814
815         cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
816                                        sizeof(struct sk_buff *), GFP_KERNEL);
817         if (!cp_ring->cp_skb) {
818                 ret = -ENOMEM;
819                 goto err;
820         }
821         pdata->tx_ring->cp_ring = cp_ring;
822         pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
823
824         pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
825         pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
826         pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
827
828         return 0;
829
830 err:
831         xgene_enet_free_desc_rings(pdata);
832         return ret;
833 }
834
835 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
836                         struct net_device *ndev,
837                         struct rtnl_link_stats64 *storage)
838 {
839         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
840         struct rtnl_link_stats64 *stats = &pdata->stats;
841
842         stats->rx_errors += stats->rx_length_errors +
843                             stats->rx_crc_errors +
844                             stats->rx_frame_errors +
845                             stats->rx_fifo_errors;
846         memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
847
848         return storage;
849 }
850
851 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
852 {
853         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
854         int ret;
855
856         ret = eth_mac_addr(ndev, addr);
857         if (ret)
858                 return ret;
859         pdata->mac_ops->set_mac_addr(pdata);
860
861         return ret;
862 }
863
864 static const struct net_device_ops xgene_ndev_ops = {
865         .ndo_open = xgene_enet_open,
866         .ndo_stop = xgene_enet_close,
867         .ndo_start_xmit = xgene_enet_start_xmit,
868         .ndo_tx_timeout = xgene_enet_timeout,
869         .ndo_get_stats64 = xgene_enet_get_stats64,
870         .ndo_change_mtu = eth_change_mtu,
871         .ndo_set_mac_address = xgene_enet_set_mac_address,
872 };
873
874 #ifdef CONFIG_ACPI
875 static int xgene_get_port_id_acpi(struct device *dev,
876                                   struct xgene_enet_pdata *pdata)
877 {
878         acpi_status status;
879         u64 temp;
880
881         status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
882         if (ACPI_FAILURE(status)) {
883                 pdata->port_id = 0;
884         } else {
885                 pdata->port_id = temp;
886         }
887
888         return 0;
889 }
890 #endif
891
892 static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
893 {
894         u32 id = 0;
895         int ret;
896
897         ret = of_property_read_u32(dev->of_node, "port-id", &id);
898         if (ret) {
899                 pdata->port_id = 0;
900                 ret = 0;
901         } else {
902                 pdata->port_id = id & BIT(0);
903         }
904
905         return ret;
906 }
907
908 static int xgene_get_mac_address(struct device *dev,
909                                  unsigned char *addr)
910 {
911         int ret;
912
913         ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
914         if (ret)
915                 ret = device_property_read_u8_array(dev, "mac-address",
916                                                     addr, 6);
917         if (ret)
918                 return -ENODEV;
919
920         return ETH_ALEN;
921 }
922
923 static int xgene_get_phy_mode(struct device *dev)
924 {
925         int i, ret;
926         char *modestr;
927
928         ret = device_property_read_string(dev, "phy-connection-type",
929                                           (const char **)&modestr);
930         if (ret)
931                 ret = device_property_read_string(dev, "phy-mode",
932                                                   (const char **)&modestr);
933         if (ret)
934                 return -ENODEV;
935
936         for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
937                 if (!strcasecmp(modestr, phy_modes(i)))
938                         return i;
939         }
940         return -ENODEV;
941 }
942
943 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
944 {
945         struct platform_device *pdev;
946         struct net_device *ndev;
947         struct device *dev;
948         struct resource *res;
949         void __iomem *base_addr;
950         u32 offset;
951         int ret;
952
953         pdev = pdata->pdev;
954         dev = &pdev->dev;
955         ndev = pdata->ndev;
956
957         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
958         if (!res) {
959                 dev_err(dev, "Resource enet_csr not defined\n");
960                 return -ENODEV;
961         }
962         pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
963         if (!pdata->base_addr) {
964                 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
965                 return -ENOMEM;
966         }
967
968         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
969         if (!res) {
970                 dev_err(dev, "Resource ring_csr not defined\n");
971                 return -ENODEV;
972         }
973         pdata->ring_csr_addr = devm_ioremap(dev, res->start,
974                                                         resource_size(res));
975         if (!pdata->ring_csr_addr) {
976                 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
977                 return -ENOMEM;
978         }
979
980         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
981         if (!res) {
982                 dev_err(dev, "Resource ring_cmd not defined\n");
983                 return -ENODEV;
984         }
985         pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
986                                                         resource_size(res));
987         if (!pdata->ring_cmd_addr) {
988                 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
989                 return -ENOMEM;
990         }
991
992         if (dev->of_node)
993                 ret = xgene_get_port_id_dt(dev, pdata);
994 #ifdef CONFIG_ACPI
995         else
996                 ret = xgene_get_port_id_acpi(dev, pdata);
997 #endif
998         if (ret)
999                 return ret;
1000
1001         if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
1002                 eth_hw_addr_random(ndev);
1003
1004         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1005
1006         pdata->phy_mode = xgene_get_phy_mode(dev);
1007         if (pdata->phy_mode < 0) {
1008                 dev_err(dev, "Unable to get phy-connection-type\n");
1009                 return pdata->phy_mode;
1010         }
1011         if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1012             pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1013             pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1014                 dev_err(dev, "Incorrect phy-connection-type specified\n");
1015                 return -ENODEV;
1016         }
1017
1018         ret = platform_get_irq(pdev, 0);
1019         if (ret <= 0) {
1020                 dev_err(dev, "Unable to get ENET Rx IRQ\n");
1021                 ret = ret ? : -ENXIO;
1022                 return ret;
1023         }
1024         pdata->rx_irq = ret;
1025
1026         if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
1027                 ret = platform_get_irq(pdev, 1);
1028                 if (ret <= 0) {
1029                         pdata->cq_cnt = 0;
1030                         dev_info(dev, "Unable to get Tx completion IRQ,"
1031                                  "using Rx IRQ instead\n");
1032                 } else {
1033                         pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
1034                         pdata->txc_irq = ret;
1035                 }
1036         }
1037
1038         pdata->clk = devm_clk_get(&pdev->dev, NULL);
1039         if (IS_ERR(pdata->clk)) {
1040                 /* Firmware may have set up the clock already. */
1041                 dev_info(dev, "clocks have been setup already\n");
1042         }
1043
1044         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1045                 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1046         else
1047                 base_addr = pdata->base_addr;
1048         pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1049         pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1050         pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1051         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1052             pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1053                 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1054                 offset = (pdata->enet_id == XGENE_ENET1) ?
1055                           BLOCK_ETH_MAC_CSR_OFFSET :
1056                           X2_BLOCK_ETH_MAC_CSR_OFFSET;
1057                 pdata->mcx_mac_csr_addr = base_addr + offset;
1058         } else {
1059                 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1060                 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1061         }
1062         pdata->rx_buff_cnt = NUM_PKT_BUF;
1063
1064         return 0;
1065 }
1066
1067 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1068 {
1069         struct net_device *ndev = pdata->ndev;
1070         struct xgene_enet_desc_ring *buf_pool;
1071         u16 dst_ring_num;
1072         int ret;
1073
1074         ret = pdata->port_ops->reset(pdata);
1075         if (ret)
1076                 return ret;
1077
1078         ret = xgene_enet_create_desc_rings(ndev);
1079         if (ret) {
1080                 netdev_err(ndev, "Error in ring configuration\n");
1081                 return ret;
1082         }
1083
1084         /* setup buffer pool */
1085         buf_pool = pdata->rx_ring->buf_pool;
1086         xgene_enet_init_bufpool(buf_pool);
1087         ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1088         if (ret) {
1089                 xgene_enet_delete_desc_rings(pdata);
1090                 return ret;
1091         }
1092
1093         dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
1094         pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1095         pdata->mac_ops->init(pdata);
1096
1097         return ret;
1098 }
1099
1100 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1101 {
1102         switch (pdata->phy_mode) {
1103         case PHY_INTERFACE_MODE_RGMII:
1104                 pdata->mac_ops = &xgene_gmac_ops;
1105                 pdata->port_ops = &xgene_gport_ops;
1106                 pdata->rm = RM3;
1107                 break;
1108         case PHY_INTERFACE_MODE_SGMII:
1109                 pdata->mac_ops = &xgene_sgmac_ops;
1110                 pdata->port_ops = &xgene_sgport_ops;
1111                 pdata->rm = RM1;
1112                 break;
1113         default:
1114                 pdata->mac_ops = &xgene_xgmac_ops;
1115                 pdata->port_ops = &xgene_xgport_ops;
1116                 pdata->rm = RM0;
1117                 break;
1118         }
1119
1120         if (pdata->enet_id == XGENE_ENET1) {
1121                 switch (pdata->port_id) {
1122                 case 0:
1123                         pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1124                         pdata->eth_bufnum = START_ETH_BUFNUM_0;
1125                         pdata->bp_bufnum = START_BP_BUFNUM_0;
1126                         pdata->ring_num = START_RING_NUM_0;
1127                         break;
1128                 case 1:
1129                         pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1130                         pdata->eth_bufnum = START_ETH_BUFNUM_1;
1131                         pdata->bp_bufnum = START_BP_BUFNUM_1;
1132                         pdata->ring_num = START_RING_NUM_1;
1133                         break;
1134                 default:
1135                         break;
1136                 }
1137                 pdata->ring_ops = &xgene_ring1_ops;
1138         } else {
1139                 switch (pdata->port_id) {
1140                 case 0:
1141                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1142                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1143                         pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1144                         pdata->ring_num = X2_START_RING_NUM_0;
1145                         break;
1146                 case 1:
1147                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1148                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1149                         pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1150                         pdata->ring_num = X2_START_RING_NUM_1;
1151                         break;
1152                 default:
1153                         break;
1154                 }
1155                 pdata->rm = RM0;
1156                 pdata->ring_ops = &xgene_ring2_ops;
1157         }
1158 }
1159
1160 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1161 {
1162         struct napi_struct *napi;
1163
1164         napi = &pdata->rx_ring->napi;
1165         netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
1166
1167         if (pdata->cq_cnt) {
1168                 napi = &pdata->tx_ring->cp_ring->napi;
1169                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1170                                NAPI_POLL_WEIGHT);
1171         }
1172 }
1173
1174 static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1175 {
1176         struct napi_struct *napi;
1177
1178         napi = &pdata->rx_ring->napi;
1179         netif_napi_del(napi);
1180
1181         if (pdata->cq_cnt) {
1182                 napi = &pdata->tx_ring->cp_ring->napi;
1183                 netif_napi_del(napi);
1184         }
1185 }
1186
1187 static int xgene_enet_probe(struct platform_device *pdev)
1188 {
1189         struct net_device *ndev;
1190         struct xgene_enet_pdata *pdata;
1191         struct device *dev = &pdev->dev;
1192         struct xgene_mac_ops *mac_ops;
1193         const struct of_device_id *of_id;
1194         int ret;
1195
1196         ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
1197         if (!ndev)
1198                 return -ENOMEM;
1199
1200         pdata = netdev_priv(ndev);
1201
1202         pdata->pdev = pdev;
1203         pdata->ndev = ndev;
1204         SET_NETDEV_DEV(ndev, dev);
1205         platform_set_drvdata(pdev, pdata);
1206         ndev->netdev_ops = &xgene_ndev_ops;
1207         xgene_enet_set_ethtool_ops(ndev);
1208         ndev->features |= NETIF_F_IP_CSUM |
1209                           NETIF_F_GSO |
1210                           NETIF_F_GRO;
1211
1212         of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1213         if (of_id) {
1214                 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1215         }
1216 #ifdef CONFIG_ACPI
1217         else {
1218                 const struct acpi_device_id *acpi_id;
1219
1220                 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1221                 if (acpi_id)
1222                         pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1223         }
1224 #endif
1225         if (!pdata->enet_id) {
1226                 free_netdev(ndev);
1227                 return -ENODEV;
1228         }
1229
1230         ret = xgene_enet_get_resources(pdata);
1231         if (ret)
1232                 goto err;
1233
1234         xgene_enet_setup_ops(pdata);
1235
1236         ret = register_netdev(ndev);
1237         if (ret) {
1238                 netdev_err(ndev, "Failed to register netdev\n");
1239                 goto err;
1240         }
1241
1242         ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1243         if (ret) {
1244                 netdev_err(ndev, "No usable DMA configuration\n");
1245                 goto err;
1246         }
1247
1248         ret = xgene_enet_init_hw(pdata);
1249         if (ret)
1250                 goto err;
1251
1252         xgene_enet_napi_add(pdata);
1253         mac_ops = pdata->mac_ops;
1254         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1255                 ret = xgene_enet_mdio_config(pdata);
1256         else
1257                 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
1258
1259         return ret;
1260 err:
1261         unregister_netdev(ndev);
1262         free_netdev(ndev);
1263         return ret;
1264 }
1265
1266 static int xgene_enet_remove(struct platform_device *pdev)
1267 {
1268         struct xgene_enet_pdata *pdata;
1269         struct xgene_mac_ops *mac_ops;
1270         struct net_device *ndev;
1271
1272         pdata = platform_get_drvdata(pdev);
1273         mac_ops = pdata->mac_ops;
1274         ndev = pdata->ndev;
1275
1276         mac_ops->rx_disable(pdata);
1277         mac_ops->tx_disable(pdata);
1278
1279         xgene_enet_napi_del(pdata);
1280         xgene_enet_mdio_remove(pdata);
1281         xgene_enet_delete_desc_rings(pdata);
1282         unregister_netdev(ndev);
1283         pdata->port_ops->shutdown(pdata);
1284         free_netdev(ndev);
1285
1286         return 0;
1287 }
1288
1289 #ifdef CONFIG_ACPI
1290 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1291         { "APMC0D05", XGENE_ENET1},
1292         { "APMC0D30", XGENE_ENET1},
1293         { "APMC0D31", XGENE_ENET1},
1294         { "APMC0D26", XGENE_ENET2},
1295         { "APMC0D25", XGENE_ENET2},
1296         { }
1297 };
1298 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1299 #endif
1300
1301 #ifdef CONFIG_OF
1302 static const struct of_device_id xgene_enet_of_match[] = {
1303         {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
1304         {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1305         {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1306         {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1307         {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1308         {},
1309 };
1310
1311 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1312 #endif
1313
1314 static struct platform_driver xgene_enet_driver = {
1315         .driver = {
1316                    .name = "xgene-enet",
1317                    .of_match_table = of_match_ptr(xgene_enet_of_match),
1318                    .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1319         },
1320         .probe = xgene_enet_probe,
1321         .remove = xgene_enet_remove,
1322 };
1323
1324 module_platform_driver(xgene_enet_driver);
1325
1326 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1327 MODULE_VERSION(XGENE_DRV_VERSION);
1328 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1329 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1330 MODULE_LICENSE("GPL");