2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/clk.h>
11 #include <linux/cpumask.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/interrupt.h>
17 #include <linux/ipv6.h>
18 #include <linux/module.h>
19 #include <linux/phy.h>
20 #include <linux/platform_device.h>
21 #include <linux/skbuff.h>
26 #define NIC_MAX_Q_PER_VF 16
27 #define HNS_NIC_TX_TIMEOUT (5 * HZ)
29 #define SERVICE_TIMER_HZ (1 * HZ)
31 #define NIC_TX_CLEAN_MAX_NUM 256
32 #define NIC_RX_CLEAN_MAX_NUM 64
34 #define RCB_ERR_PRINT_CYCLE 1000
36 #define RCB_IRQ_NOT_INITED 0
37 #define RCB_IRQ_INITED 1
39 static void fill_desc(struct hnae_ring *ring, void *priv,
40 int size, dma_addr_t dma, int frag_end,
41 int buf_num, enum hns_desc_type type)
43 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
44 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
48 u32 asid_bufnum_pid = 0;
49 u32 flag_ipoffset = 0;
52 desc_cb->length = size;
56 desc->addr = cpu_to_le64(dma);
57 desc->tx.send_size = cpu_to_le16((u16)size);
59 /*config bd buffer end */
60 flag_ipoffset |= 1 << HNS_TXD_VLD_B;
62 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
64 if (type == DESC_TYPE_SKB) {
65 skb = (struct sk_buff *)priv;
67 if (skb->ip_summed == CHECKSUM_PARTIAL) {
68 protocol = skb->protocol;
71 /*if it is a SW VLAN check the next protocol*/
72 if (protocol == htons(ETH_P_8021Q)) {
73 ip_offset += VLAN_HLEN;
74 protocol = vlan_get_protocol(skb);
75 skb->protocol = protocol;
78 if (skb->protocol == htons(ETH_P_IP)) {
79 flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
80 /* check for tcp/udp header */
81 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
83 } else if (skb->protocol == htons(ETH_P_IPV6)) {
84 /* ipv6 has not l3 cs, check for L4 header */
85 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
88 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
92 flag_ipoffset |= frag_end << HNS_TXD_FE_B;
94 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
95 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
97 ring_ptr_move_fw(ring, next_to_use);
100 static void unfill_desc(struct hnae_ring *ring)
102 ring_ptr_move_bw(ring, next_to_use);
105 int hns_nic_net_xmit_hw(struct net_device *ndev,
107 struct hns_nic_ring_data *ring_data)
109 struct hns_nic_priv *priv = netdev_priv(ndev);
110 struct device *dev = priv->dev;
111 struct hnae_ring *ring = ring_data->ring;
112 struct netdev_queue *dev_queue;
113 struct skb_frag_struct *frag;
116 int size, next_to_use;
118 struct sk_buff *new_skb;
120 assert(ring->max_desc_num_per_pkt <= ring->desc_num);
122 /* no. of segments (plus a header) */
123 buf_num = skb_shinfo(skb)->nr_frags + 1;
125 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
126 if (ring_space(ring) < 1) {
127 ring->stats.tx_busy++;
128 goto out_net_tx_busy;
131 new_skb = skb_copy(skb, GFP_ATOMIC);
133 ring->stats.sw_err_cnt++;
134 netdev_err(ndev, "no memory to xmit!\n");
138 dev_kfree_skb_any(skb);
141 assert(skb_shinfo(skb)->nr_frags == 1);
142 } else if (buf_num > ring_space(ring)) {
143 ring->stats.tx_busy++;
144 goto out_net_tx_busy;
146 next_to_use = ring->next_to_use;
148 /* fill the first part */
149 size = skb_headlen(skb);
150 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
151 if (dma_mapping_error(dev, dma)) {
152 netdev_err(ndev, "TX head DMA map failed\n");
153 ring->stats.sw_err_cnt++;
156 fill_desc(ring, skb, size, dma, buf_num == 1 ? 1 : 0, buf_num,
159 /* fill the fragments */
160 for (i = 1; i < buf_num; i++) {
161 frag = &skb_shinfo(skb)->frags[i - 1];
162 size = skb_frag_size(frag);
163 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
164 if (dma_mapping_error(dev, dma)) {
165 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
166 ring->stats.sw_err_cnt++;
167 goto out_map_frag_fail;
169 fill_desc(ring, skb_frag_page(frag), size, dma,
170 buf_num - 1 == i ? 1 : 0, buf_num, DESC_TYPE_PAGE);
173 /*complete translate all packets*/
174 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
175 netdev_tx_sent_queue(dev_queue, skb->len);
177 wmb(); /* commit all data before submit */
178 assert(skb->queue_mapping < priv->ae_handle->q_num);
179 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
180 ring->stats.tx_pkts++;
181 ring->stats.tx_bytes += skb->len;
187 for (j = i - 1; j > 0; j--) {
189 next_to_use = ring->next_to_use;
190 dma_unmap_page(dev, ring->desc_cb[next_to_use].dma,
191 ring->desc_cb[next_to_use].length,
196 next_to_use = ring->next_to_use;
197 dma_unmap_single(dev, ring->desc_cb[next_to_use].dma,
198 ring->desc_cb[next_to_use].length, DMA_TO_DEVICE);
202 dev_kfree_skb_any(skb);
207 netif_stop_subqueue(ndev, skb->queue_mapping);
209 /* Herbert's original patch had:
210 * smp_mb__after_netif_stop_queue();
211 * but since that doesn't exist yet, just open code it.
214 return NETDEV_TX_BUSY;
218 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
219 * @data: pointer to the start of the headers
220 * @max: total length of section to find headers in
222 * This function is meant to determine the length of headers that will
223 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
224 * motivation of doing this is to only perform one pull for IPv4 TCP
225 * packets so that we can do basic things like calculating the gso_size
226 * based on the average data per packet.
228 static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
229 unsigned int max_size)
231 unsigned char *network;
234 /* this should never happen, but better safe than sorry */
235 if (max_size < ETH_HLEN)
238 /* initialize network frame pointer */
241 /* set first protocol and move network header forward */
244 /* handle any vlan tag if present */
245 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
246 == HNS_RX_FLAG_VLAN_PRESENT) {
247 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
250 network += VLAN_HLEN;
253 /* handle L3 protocols */
254 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
255 == HNS_RX_FLAG_L3ID_IPV4) {
256 if ((typeof(max_size))(network - data) >
257 (max_size - sizeof(struct iphdr)))
260 /* access ihl as a u8 to avoid unaligned access on ia64 */
261 hlen = (network[0] & 0x0F) << 2;
263 /* verify hlen meets minimum size requirements */
264 if (hlen < sizeof(struct iphdr))
265 return network - data;
267 /* record next protocol if header is present */
268 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
269 == HNS_RX_FLAG_L3ID_IPV6) {
270 if ((typeof(max_size))(network - data) >
271 (max_size - sizeof(struct ipv6hdr)))
274 /* record next protocol */
275 hlen = sizeof(struct ipv6hdr);
277 return network - data;
280 /* relocate pointer to start of L4 header */
283 /* finally sort out TCP/UDP */
284 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
285 == HNS_RX_FLAG_L4ID_TCP) {
286 if ((typeof(max_size))(network - data) >
287 (max_size - sizeof(struct tcphdr)))
290 /* access doff as a u8 to avoid unaligned access on ia64 */
291 hlen = (network[12] & 0xF0) >> 2;
293 /* verify hlen meets minimum size requirements */
294 if (hlen < sizeof(struct tcphdr))
295 return network - data;
298 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
299 == HNS_RX_FLAG_L4ID_UDP) {
300 if ((typeof(max_size))(network - data) >
301 (max_size - sizeof(struct udphdr)))
304 network += sizeof(struct udphdr);
307 /* If everything has gone correctly network should be the
308 * data section of the packet and will be the end of the header.
309 * If not then it probably represents the end of the last recognized
312 if ((typeof(max_size))(network - data) < max_size)
313 return network - data;
319 hns_nic_reuse_page(struct hnae_desc_cb *desc_cb, int tsize, int last_offset)
321 /* avoid re-using remote pages,flag default unreuse */
322 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) {
323 /* move offset up to the next cache line */
324 desc_cb->page_offset += tsize;
326 if (desc_cb->page_offset <= last_offset) {
327 desc_cb->reuse_flag = 1;
328 /* bump ref count on page before it is given*/
329 get_page(desc_cb->priv);
334 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
335 struct sk_buff **out_skb, int *out_bnum)
337 struct hnae_ring *ring = ring_data->ring;
338 struct net_device *ndev = ring_data->napi.dev;
340 struct hnae_desc *desc;
341 struct hnae_desc_cb *desc_cb;
343 int bnum, length, size, i, truesize, last_offset;
347 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
348 desc = &ring->desc[ring->next_to_clean];
349 desc_cb = &ring->desc_cb[ring->next_to_clean];
350 length = le16_to_cpu(desc->rx.pkt_len);
351 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
352 bnum = hnae_get_field(bnum_flag, HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
354 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
356 skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE);
357 if (unlikely(!skb)) {
358 netdev_err(ndev, "alloc rx skb fail\n");
359 ring->stats.sw_err_cnt++;
363 if (length <= HNS_RX_HEAD_SIZE) {
364 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
366 /* we can reuse buffer as-is, just make sure it is local */
367 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
368 desc_cb->reuse_flag = 1;
369 else /* this page cannot be reused so discard it */
370 put_page(desc_cb->priv);
372 ring_ptr_move_fw(ring, next_to_clean);
374 if (unlikely(bnum != 1)) { /* check err*/
379 ring->stats.seg_pkt_cnt++;
381 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
382 memcpy(__skb_put(skb, pull_len), va,
383 ALIGN(pull_len, sizeof(long)));
385 size = le16_to_cpu(desc->rx.size);
386 truesize = ALIGN(size, L1_CACHE_BYTES);
387 skb_add_rx_frag(skb, 0, desc_cb->priv,
388 desc_cb->page_offset + pull_len,
389 size - pull_len, truesize - pull_len);
391 hns_nic_reuse_page(desc_cb, truesize, last_offset);
392 ring_ptr_move_fw(ring, next_to_clean);
394 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
398 for (i = 1; i < bnum; i++) {
399 desc = &ring->desc[ring->next_to_clean];
400 desc_cb = &ring->desc_cb[ring->next_to_clean];
401 size = le16_to_cpu(desc->rx.size);
402 truesize = ALIGN(size, L1_CACHE_BYTES);
403 skb_add_rx_frag(skb, i, desc_cb->priv,
404 desc_cb->page_offset,
407 hns_nic_reuse_page(desc_cb, truesize, last_offset);
408 ring_ptr_move_fw(ring, next_to_clean);
412 /* check except process, free skb and jump the desc */
413 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
415 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
416 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
417 bnum, ring->max_desc_num_per_pkt,
418 length, (int)MAX_SKB_FRAGS,
419 ((u64 *)desc)[0], ((u64 *)desc)[1]);
420 ring->stats.err_bd_num++;
421 dev_kfree_skb_any(skb);
425 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
427 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
428 netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
429 ((u64 *)desc)[0], ((u64 *)desc)[1]);
430 ring->stats.non_vld_descs++;
431 dev_kfree_skb_any(skb);
435 if (unlikely((!desc->rx.pkt_len) ||
436 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
437 if (!(ring->stats.err_pkt_len % RCB_ERR_PRINT_CYCLE))
439 "pkt_len(%u),drop(%u),%#llx,%#llx\n",
440 le16_to_cpu(desc->rx.pkt_len),
441 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B),
442 ((u64 *)desc)[0], ((u64 *)desc)[1]);
443 ring->stats.err_pkt_len++;
444 dev_kfree_skb_any(skb);
448 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
449 if (!(ring->stats.l2_err % RCB_ERR_PRINT_CYCLE))
450 netdev_dbg(ndev, "L2 check err,%#llx,%#llx\n",
451 ((u64 *)desc)[0], ((u64 *)desc)[1]);
452 ring->stats.l2_err++;
453 dev_kfree_skb_any(skb);
457 ring->stats.rx_pkts++;
458 ring->stats.rx_bytes += skb->len;
460 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) ||
461 hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) {
462 if (!(ring->stats.l3l4_csum_err % RCB_ERR_PRINT_CYCLE))
464 "check err(%#x),%#llx,%#llx\n",
465 hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) |
466 hnae_get_bit(bnum_flag, HNS_RXD_L4E_B),
467 ((u64 *)desc)[0], ((u64 *)desc)[1]);
468 ring->stats.l3l4_csum_err++;
472 skb->ip_summed = CHECKSUM_UNNECESSARY;
478 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
481 struct hnae_desc_cb res_cbs;
482 struct hnae_desc_cb *desc_cb;
483 struct hnae_ring *ring = ring_data->ring;
484 struct net_device *ndev = ring_data->napi.dev;
486 for (i = 0; i < cleand_count; i++) {
487 desc_cb = &ring->desc_cb[ring->next_to_use];
488 if (desc_cb->reuse_flag) {
489 ring->stats.reuse_pg_cnt++;
490 hnae_reuse_buffer(ring, ring->next_to_use);
492 ret = hnae_reserve_buffer_map(ring, &res_cbs);
494 ring->stats.sw_err_cnt++;
495 netdev_err(ndev, "hnae reserve buffer map failed.\n");
498 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
501 ring_ptr_move_fw(ring, next_to_use);
504 wmb(); /* make all data has been write before submit */
505 writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
508 /* return error number for error or number of desc left to take
510 static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
513 struct net_device *ndev = ring_data->napi.dev;
515 skb->protocol = eth_type_trans(skb, ndev);
516 (void)napi_gro_receive(&ring_data->napi, skb);
517 ndev->last_rx = jiffies;
520 static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
523 struct hnae_ring *ring = ring_data->ring;
525 int num, bnum, ex_num;
526 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
527 int recv_pkts, recv_bds, clean_count, err;
529 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
530 rmb(); /* make sure num taken effect before the other data is touched */
532 recv_pkts = 0, recv_bds = 0, clean_count = 0;
534 while (recv_pkts < budget && recv_bds < num) {
535 /* reuse or realloc buffers*/
536 if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
537 hns_nic_alloc_rx_buffers(ring_data, clean_count);
542 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
543 if (unlikely(!skb)) /* this fault cannot be repaired */
548 if (unlikely(err)) { /* do jump the err */
553 /* do update ip stack process*/
554 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
559 /* make all data has been write before submit */
560 if (clean_count > 0) {
561 hns_nic_alloc_rx_buffers(ring_data, clean_count);
565 if (recv_pkts < budget) {
566 ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
567 rmb(); /*complete read rx ring bd number*/
577 static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
579 struct hnae_ring *ring = ring_data->ring;
582 /* for hardware bug fixed */
583 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
586 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
589 napi_schedule(&ring_data->napi);
593 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
594 int *bytes, int *pkts)
596 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
598 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
599 (*bytes) += desc_cb->length;
600 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
601 hnae_free_buffer_detach(ring, ring->next_to_clean);
603 ring_ptr_move_fw(ring, next_to_clean);
606 static int is_valid_clean_head(struct hnae_ring *ring, int h)
608 int u = ring->next_to_use;
609 int c = ring->next_to_clean;
611 if (unlikely(h > ring->desc_num))
614 assert(u > 0 && u < ring->desc_num);
615 assert(c > 0 && c < ring->desc_num);
616 assert(u != c && h != c); /* must be checked before call this func */
618 return u > c ? (h > c && h <= u) : (h > c || h <= u);
621 /* netif_tx_lock will turn down the performance, set only when necessary */
622 #ifdef CONFIG_NET_POLL_CONTROLLER
623 #define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
624 #define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
626 #define NETIF_TX_LOCK(ndev)
627 #define NETIF_TX_UNLOCK(ndev)
629 /* reclaim all desc in one budget
630 * return error or number of desc left
632 static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
635 struct hnae_ring *ring = ring_data->ring;
636 struct net_device *ndev = ring_data->napi.dev;
637 struct netdev_queue *dev_queue;
638 struct hns_nic_priv *priv = netdev_priv(ndev);
644 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
645 rmb(); /* make sure head is ready before touch any data */
647 if (is_ring_empty(ring) || head == ring->next_to_clean) {
648 NETIF_TX_UNLOCK(ndev);
649 return 0; /* no data to poll */
652 if (!is_valid_clean_head(ring, head)) {
653 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
654 ring->next_to_use, ring->next_to_clean);
655 ring->stats.io_err_cnt++;
656 NETIF_TX_UNLOCK(ndev);
662 while (head != ring->next_to_clean)
663 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
665 NETIF_TX_UNLOCK(ndev);
667 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
668 netdev_tx_completed_queue(dev_queue, pkts, bytes);
670 if (unlikely(pkts && netif_carrier_ok(ndev) &&
671 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
672 /* Make sure that anybody stopping the queue after this
673 * sees the new next_to_clean.
676 if (netif_tx_queue_stopped(dev_queue) &&
677 !test_bit(NIC_STATE_DOWN, &priv->state)) {
678 netif_tx_wake_queue(dev_queue);
679 ring->stats.restart_queue++;
685 static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
687 struct hnae_ring *ring = ring_data->ring;
688 int head = ring->next_to_clean;
690 /* for hardware bug fixed */
691 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
693 if (head != ring->next_to_clean) {
694 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
697 napi_schedule(&ring_data->napi);
701 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
703 struct hnae_ring *ring = ring_data->ring;
704 struct net_device *ndev = ring_data->napi.dev;
705 struct netdev_queue *dev_queue;
711 head = ring->next_to_use; /* ntu :soft setted ring position*/
714 while (head != ring->next_to_clean)
715 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
717 NETIF_TX_UNLOCK(ndev);
719 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
720 netdev_tx_reset_queue(dev_queue);
723 static int hns_nic_common_poll(struct napi_struct *napi, int budget)
725 struct hns_nic_ring_data *ring_data =
726 container_of(napi, struct hns_nic_ring_data, napi);
727 int clean_complete = ring_data->poll_one(
728 ring_data, budget, ring_data->ex_process);
730 if (clean_complete >= 0 && clean_complete < budget) {
732 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
735 ring_data->fini_process(ring_data);
738 return clean_complete;
741 static irqreturn_t hns_irq_handle(int irq, void *dev)
743 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
745 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
747 napi_schedule(&ring_data->napi);
753 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
756 static void hns_nic_adjust_link(struct net_device *ndev)
758 struct hns_nic_priv *priv = netdev_priv(ndev);
759 struct hnae_handle *h = priv->ae_handle;
761 h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex);
765 *hns_nic_init_phy - init phy
768 * Return 0 on success, negative on failure
770 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
772 struct hns_nic_priv *priv = netdev_priv(ndev);
773 struct phy_device *phy_dev = NULL;
778 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
779 phy_dev = of_phy_connect(ndev, h->phy_node,
780 hns_nic_adjust_link, 0, h->phy_if);
782 phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if);
784 if (unlikely(!phy_dev) || IS_ERR(phy_dev))
785 return !phy_dev ? -ENODEV : PTR_ERR(phy_dev);
787 phy_dev->supported &= h->if_support;
788 phy_dev->advertising = phy_dev->supported;
790 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
791 phy_dev->autoneg = false;
798 static int hns_nic_ring_open(struct net_device *netdev, int idx)
800 struct hns_nic_priv *priv = netdev_priv(netdev);
801 struct hnae_handle *h = priv->ae_handle;
803 napi_enable(&priv->ring_data[idx].napi);
805 enable_irq(priv->ring_data[idx].ring->irq);
806 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
811 static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
813 struct hns_nic_priv *priv = netdev_priv(ndev);
814 struct hnae_handle *h = priv->ae_handle;
815 struct sockaddr *mac_addr = p;
818 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
819 return -EADDRNOTAVAIL;
821 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
823 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
827 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
832 void hns_nic_update_stats(struct net_device *netdev)
834 struct hns_nic_priv *priv = netdev_priv(netdev);
835 struct hnae_handle *h = priv->ae_handle;
837 h->dev->ops->update_stats(h, &netdev->stats);
840 /* set mac addr if it is configed. or leave it to the AE driver */
841 static void hns_init_mac_addr(struct net_device *ndev)
843 struct hns_nic_priv *priv = netdev_priv(ndev);
844 struct device_node *node = priv->dev->of_node;
845 const void *mac_addr_temp;
847 mac_addr_temp = of_get_mac_address(node);
848 if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) {
849 memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len);
851 eth_hw_addr_random(ndev);
852 dev_warn(priv->dev, "No valid mac, use random mac %pM",
857 static void hns_nic_ring_close(struct net_device *netdev, int idx)
859 struct hns_nic_priv *priv = netdev_priv(netdev);
860 struct hnae_handle *h = priv->ae_handle;
862 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
863 disable_irq(priv->ring_data[idx].ring->irq);
865 napi_disable(&priv->ring_data[idx].napi);
868 static int hns_nic_init_irq(struct hns_nic_priv *priv)
870 struct hnae_handle *h = priv->ae_handle;
871 struct hns_nic_ring_data *rd;
877 for (i = 0; i < h->q_num * 2; i++) {
878 rd = &priv->ring_data[i];
880 if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
883 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
884 "%s-%s%d", priv->netdev->name,
885 (i < h->q_num ? "tx" : "rx"), rd->queue_index);
887 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
889 ret = request_irq(rd->ring->irq,
890 hns_irq_handle, 0, rd->ring->ring_name, rd);
892 netdev_err(priv->netdev, "request irq(%d) fail\n",
896 disable_irq(rd->ring->irq);
897 rd->ring->irq_init_flag = RCB_IRQ_INITED;
900 if (cpu_online(rd->queue_index)) {
901 cpumask_clear(&mask);
902 cpu = rd->queue_index;
903 cpumask_set_cpu(cpu, &mask);
904 irq_set_affinity_hint(rd->ring->irq, &mask);
911 static int hns_nic_net_up(struct net_device *ndev)
913 struct hns_nic_priv *priv = netdev_priv(ndev);
914 struct hnae_handle *h = priv->ae_handle;
918 ret = hns_nic_init_irq(priv);
920 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
924 for (i = 0; i < h->q_num * 2; i++) {
925 ret = hns_nic_ring_open(ndev, i);
927 goto out_has_some_queues;
930 for (k = 0; k < h->q_num; k++)
931 h->dev->ops->toggle_queue_status(h->qs[k], 1);
933 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
935 goto out_set_mac_addr_err;
937 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
942 phy_start(priv->phy);
944 clear_bit(NIC_STATE_DOWN, &priv->state);
945 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
950 netif_stop_queue(ndev);
951 out_set_mac_addr_err:
952 for (k = 0; k < h->q_num; k++)
953 h->dev->ops->toggle_queue_status(h->qs[k], 0);
955 for (j = i - 1; j >= 0; j--)
956 hns_nic_ring_close(ndev, j);
958 set_bit(NIC_STATE_DOWN, &priv->state);
963 static void hns_nic_net_down(struct net_device *ndev)
966 struct hnae_ae_ops *ops;
967 struct hns_nic_priv *priv = netdev_priv(ndev);
969 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
972 (void)del_timer_sync(&priv->service_timer);
973 netif_tx_stop_all_queues(ndev);
974 netif_carrier_off(ndev);
975 netif_tx_disable(ndev);
981 ops = priv->ae_handle->dev->ops;
984 ops->stop(priv->ae_handle);
986 netif_tx_stop_all_queues(ndev);
988 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
989 hns_nic_ring_close(ndev, i);
990 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
992 /* clean tx buffers*/
993 hns_nic_tx_clr_all_bufs(priv->ring_data + i);
997 void hns_nic_net_reset(struct net_device *ndev)
999 struct hns_nic_priv *priv = netdev_priv(ndev);
1000 struct hnae_handle *handle = priv->ae_handle;
1002 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
1003 usleep_range(1000, 2000);
1005 (void)hnae_reinit_handle(handle);
1007 clear_bit(NIC_STATE_RESETTING, &priv->state);
1010 void hns_nic_net_reinit(struct net_device *netdev)
1012 struct hns_nic_priv *priv = netdev_priv(netdev);
1014 priv->netdev->trans_start = jiffies;
1015 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
1016 usleep_range(1000, 2000);
1018 hns_nic_net_down(netdev);
1019 hns_nic_net_reset(netdev);
1020 (void)hns_nic_net_up(netdev);
1021 clear_bit(NIC_STATE_REINITING, &priv->state);
1024 static int hns_nic_net_open(struct net_device *ndev)
1026 struct hns_nic_priv *priv = netdev_priv(ndev);
1027 struct hnae_handle *h = priv->ae_handle;
1030 if (test_bit(NIC_STATE_TESTING, &priv->state))
1034 netif_carrier_off(ndev);
1036 ret = netif_set_real_num_tx_queues(ndev, h->q_num);
1038 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1043 ret = netif_set_real_num_rx_queues(ndev, h->q_num);
1046 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
1050 ret = hns_nic_net_up(ndev);
1053 "hns net up fail, ret=%d!\n", ret);
1060 static int hns_nic_net_stop(struct net_device *ndev)
1062 hns_nic_net_down(ndev);
1067 static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1068 static void hns_nic_net_timeout(struct net_device *ndev)
1070 struct hns_nic_priv *priv = netdev_priv(ndev);
1072 hns_tx_timeout_reset(priv);
1075 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
1078 struct hns_nic_priv *priv = netdev_priv(netdev);
1079 struct phy_device *phy_dev = priv->phy;
1081 if (!netif_running(netdev))
1087 return phy_mii_ioctl(phy_dev, ifr, cmd);
1090 /* use only for netconsole to poll with the device without interrupt */
1091 #ifdef CONFIG_NET_POLL_CONTROLLER
1092 void hns_nic_poll_controller(struct net_device *ndev)
1094 struct hns_nic_priv *priv = netdev_priv(ndev);
1095 unsigned long flags;
1098 local_irq_save(flags);
1099 for (i = 0; i < priv->ae_handle->q_num * 2; i++)
1100 napi_schedule(&priv->ring_data[i].napi);
1101 local_irq_restore(flags);
1105 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1106 struct net_device *ndev)
1108 struct hns_nic_priv *priv = netdev_priv(ndev);
1111 assert(skb->queue_mapping < ndev->ae_handle->q_num);
1112 ret = hns_nic_net_xmit_hw(ndev, skb,
1113 &tx_ring_data(priv, skb->queue_mapping));
1114 if (ret == NETDEV_TX_OK) {
1115 ndev->trans_start = jiffies;
1116 ndev->stats.tx_bytes += skb->len;
1117 ndev->stats.tx_packets++;
1119 return (netdev_tx_t)ret;
1122 static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
1124 struct hns_nic_priv *priv = netdev_priv(ndev);
1125 struct hnae_handle *h = priv->ae_handle;
1128 /* MTU < 68 is an error and causes problems on some kernels */
1132 if (!h->dev->ops->set_mtu)
1135 if (netif_running(ndev)) {
1136 (void)hns_nic_net_stop(ndev);
1139 ret = h->dev->ops->set_mtu(h, new_mtu);
1141 netdev_err(ndev, "set mtu fail, return value %d\n",
1144 if (hns_nic_net_open(ndev))
1145 netdev_err(ndev, "hns net open fail\n");
1147 ret = h->dev->ops->set_mtu(h, new_mtu);
1151 ndev->mtu = new_mtu;
1157 * nic_set_multicast_list - set mutl mac address
1158 * @netdev: net device
1163 void hns_set_multicast_list(struct net_device *ndev)
1165 struct hns_nic_priv *priv = netdev_priv(ndev);
1166 struct hnae_handle *h = priv->ae_handle;
1167 struct netdev_hw_addr *ha = NULL;
1170 netdev_err(ndev, "hnae handle is null\n");
1174 if (h->dev->ops->set_mc_addr) {
1175 netdev_for_each_mc_addr(ha, ndev)
1176 if (h->dev->ops->set_mc_addr(h, ha->addr))
1177 netdev_err(ndev, "set multicast fail\n");
1181 struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
1182 struct rtnl_link_stats64 *stats)
1189 struct hns_nic_priv *priv = netdev_priv(ndev);
1190 struct hnae_handle *h = priv->ae_handle;
1192 for (idx = 0; idx < h->q_num; idx++) {
1193 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
1194 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
1195 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
1196 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
1199 stats->tx_bytes = tx_bytes;
1200 stats->tx_packets = tx_pkts;
1201 stats->rx_bytes = rx_bytes;
1202 stats->rx_packets = rx_pkts;
1204 stats->rx_errors = ndev->stats.rx_errors;
1205 stats->multicast = ndev->stats.multicast;
1206 stats->rx_length_errors = ndev->stats.rx_length_errors;
1207 stats->rx_crc_errors = ndev->stats.rx_crc_errors;
1208 stats->rx_missed_errors = ndev->stats.rx_missed_errors;
1210 stats->tx_errors = ndev->stats.tx_errors;
1211 stats->rx_dropped = ndev->stats.rx_dropped;
1212 stats->tx_dropped = ndev->stats.tx_dropped;
1213 stats->collisions = ndev->stats.collisions;
1214 stats->rx_over_errors = ndev->stats.rx_over_errors;
1215 stats->rx_frame_errors = ndev->stats.rx_frame_errors;
1216 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
1217 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
1218 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
1219 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
1220 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
1221 stats->tx_window_errors = ndev->stats.tx_window_errors;
1222 stats->rx_compressed = ndev->stats.rx_compressed;
1223 stats->tx_compressed = ndev->stats.tx_compressed;
1228 static const struct net_device_ops hns_nic_netdev_ops = {
1229 .ndo_open = hns_nic_net_open,
1230 .ndo_stop = hns_nic_net_stop,
1231 .ndo_start_xmit = hns_nic_net_xmit,
1232 .ndo_tx_timeout = hns_nic_net_timeout,
1233 .ndo_set_mac_address = hns_nic_net_set_mac_address,
1234 .ndo_change_mtu = hns_nic_change_mtu,
1235 .ndo_do_ioctl = hns_nic_do_ioctl,
1236 .ndo_get_stats64 = hns_nic_get_stats64,
1237 #ifdef CONFIG_NET_POLL_CONTROLLER
1238 .ndo_poll_controller = hns_nic_poll_controller,
1240 .ndo_set_rx_mode = hns_set_multicast_list,
1243 static void hns_nic_update_link_status(struct net_device *netdev)
1245 struct hns_nic_priv *priv = netdev_priv(netdev);
1247 struct hnae_handle *h = priv->ae_handle;
1251 if (!genphy_update_link(priv->phy))
1252 state = priv->phy->link;
1256 state = state && h->dev->ops->get_status(h);
1258 if (state != priv->link) {
1260 netif_carrier_on(netdev);
1261 netif_tx_wake_all_queues(netdev);
1262 netdev_info(netdev, "link up\n");
1264 netif_carrier_off(netdev);
1265 netdev_info(netdev, "link down\n");
1271 /* for dumping key regs*/
1272 static void hns_nic_dump(struct hns_nic_priv *priv)
1274 struct hnae_handle *h = priv->ae_handle;
1275 struct hnae_ae_ops *ops = h->dev->ops;
1276 u32 *data, reg_num, i;
1278 if (ops->get_regs_len && ops->get_regs) {
1279 reg_num = ops->get_regs_len(priv->ae_handle);
1280 reg_num = (reg_num + 3ul) & ~3ul;
1281 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
1283 ops->get_regs(priv->ae_handle, data);
1284 for (i = 0; i < reg_num; i += 4)
1285 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1286 i, data[i], data[i + 1],
1287 data[i + 2], data[i + 3]);
1292 for (i = 0; i < h->q_num; i++) {
1293 pr_info("tx_queue%d_next_to_clean:%d\n",
1294 i, h->qs[i]->tx_ring.next_to_clean);
1295 pr_info("tx_queue%d_next_to_use:%d\n",
1296 i, h->qs[i]->tx_ring.next_to_use);
1297 pr_info("rx_queue%d_next_to_clean:%d\n",
1298 i, h->qs[i]->rx_ring.next_to_clean);
1299 pr_info("rx_queue%d_next_to_use:%d\n",
1300 i, h->qs[i]->rx_ring.next_to_use);
1304 /* for resetting suntask*/
1305 static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
1307 enum hnae_port_type type = priv->ae_handle->port_type;
1309 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
1311 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1313 /* If we're already down, removing or resetting, just bail */
1314 if (test_bit(NIC_STATE_DOWN, &priv->state) ||
1315 test_bit(NIC_STATE_REMOVING, &priv->state) ||
1316 test_bit(NIC_STATE_RESETTING, &priv->state))
1320 netdev_err(priv->netdev, "Reset %s port\n",
1321 (type == HNAE_PORT_DEBUG ? "debug" : "business"));
1324 if (type == HNAE_PORT_DEBUG) {
1325 hns_nic_net_reinit(priv->netdev);
1327 hns_nic_net_down(priv->netdev);
1328 hns_nic_net_reset(priv->netdev);
1333 /* for doing service complete*/
1334 static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
1336 assert(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
1338 smp_mb__before_atomic();
1339 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
1342 static void hns_nic_service_task(struct work_struct *work)
1344 struct hns_nic_priv *priv
1345 = container_of(work, struct hns_nic_priv, service_task);
1346 struct hnae_handle *h = priv->ae_handle;
1348 hns_nic_update_link_status(priv->netdev);
1349 h->dev->ops->update_led_status(h);
1350 hns_nic_update_stats(priv->netdev);
1352 hns_nic_reset_subtask(priv);
1353 hns_nic_service_event_complete(priv);
1356 static void hns_nic_task_schedule(struct hns_nic_priv *priv)
1358 if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
1359 !test_bit(NIC_STATE_REMOVING, &priv->state) &&
1360 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
1361 (void)schedule_work(&priv->service_task);
1364 static void hns_nic_service_timer(unsigned long data)
1366 struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
1368 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1370 hns_nic_task_schedule(priv);
1374 * hns_tx_timeout_reset - initiate reset due to Tx timeout
1375 * @priv: driver private struct
1377 static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
1379 /* Do the reset outside of interrupt context */
1380 if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
1381 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1382 netdev_warn(priv->netdev,
1383 "initiating reset due to tx timeout(%llu,0x%lx)\n",
1384 priv->tx_timeout_count, priv->state);
1385 priv->tx_timeout_count++;
1386 hns_nic_task_schedule(priv);
1390 static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1392 struct hnae_handle *h = priv->ae_handle;
1393 struct hns_nic_ring_data *rd;
1396 if (h->q_num > NIC_MAX_Q_PER_VF) {
1397 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
1401 priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
1403 if (!priv->ring_data)
1406 for (i = 0; i < h->q_num; i++) {
1407 rd = &priv->ring_data[i];
1408 rd->queue_index = i;
1409 rd->ring = &h->qs[i]->tx_ring;
1410 rd->poll_one = hns_nic_tx_poll_one;
1411 rd->fini_process = hns_nic_tx_fini_pro;
1413 netif_napi_add(priv->netdev, &rd->napi,
1414 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
1415 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1417 for (i = h->q_num; i < h->q_num * 2; i++) {
1418 rd = &priv->ring_data[i];
1419 rd->queue_index = i - h->q_num;
1420 rd->ring = &h->qs[i - h->q_num]->rx_ring;
1421 rd->poll_one = hns_nic_rx_poll_one;
1422 rd->ex_process = hns_nic_rx_up_pro;
1423 rd->fini_process = hns_nic_rx_fini_pro;
1425 netif_napi_add(priv->netdev, &rd->napi,
1426 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
1427 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1433 static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
1435 struct hnae_handle *h = priv->ae_handle;
1438 for (i = 0; i < h->q_num * 2; i++) {
1439 netif_napi_del(&priv->ring_data[i].napi);
1440 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
1441 irq_set_affinity_hint(priv->ring_data[i].ring->irq,
1443 free_irq(priv->ring_data[i].ring->irq,
1444 &priv->ring_data[i]);
1447 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1449 kfree(priv->ring_data);
1452 static int hns_nic_try_get_ae(struct net_device *ndev)
1454 struct hns_nic_priv *priv = netdev_priv(ndev);
1455 struct hnae_handle *h;
1458 h = hnae_get_handle(&priv->netdev->dev,
1459 priv->ae_name, priv->port_id, NULL);
1460 if (IS_ERR_OR_NULL(h)) {
1462 dev_dbg(priv->dev, "has not handle, register notifier!\n");
1465 priv->ae_handle = h;
1467 ret = hns_nic_init_phy(ndev, h);
1469 dev_err(priv->dev, "probe phy device fail!\n");
1473 ret = hns_nic_init_ring_data(priv);
1476 goto out_init_ring_data;
1479 ret = register_netdev(ndev);
1481 dev_err(priv->dev, "probe register netdev fail!\n");
1482 goto out_reg_ndev_fail;
1487 hns_nic_uninit_ring_data(priv);
1488 priv->ring_data = NULL;
1491 hnae_put_handle(priv->ae_handle);
1492 priv->ae_handle = NULL;
1497 static int hns_nic_notifier_action(struct notifier_block *nb,
1498 unsigned long action, void *data)
1500 struct hns_nic_priv *priv =
1501 container_of(nb, struct hns_nic_priv, notifier_block);
1503 assert(action == HNAE_AE_REGISTER);
1505 if (!hns_nic_try_get_ae(priv->netdev)) {
1506 hnae_unregister_notifier(&priv->notifier_block);
1507 priv->notifier_block.notifier_call = NULL;
1512 static int hns_nic_dev_probe(struct platform_device *pdev)
1514 struct device *dev = &pdev->dev;
1515 struct net_device *ndev;
1516 struct hns_nic_priv *priv;
1517 struct device_node *node = dev->of_node;
1520 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
1524 platform_set_drvdata(pdev, ndev);
1526 priv = netdev_priv(ndev);
1528 priv->netdev = ndev;
1530 if (of_device_is_compatible(node, "hisilicon,hns-nic-v2"))
1531 priv->enet_ver = AE_VERSION_2;
1533 priv->enet_ver = AE_VERSION_1;
1535 ret = of_property_read_string(node, "ae-name", &priv->ae_name);
1537 goto out_read_string_fail;
1539 ret = of_property_read_u32(node, "port-id", &priv->port_id);
1541 goto out_read_string_fail;
1543 hns_init_mac_addr(ndev);
1545 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
1546 ndev->priv_flags |= IFF_UNICAST_FLT;
1547 ndev->netdev_ops = &hns_nic_netdev_ops;
1548 hns_ethtool_set_ops(ndev);
1549 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1550 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1552 ndev->vlan_features |=
1553 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
1554 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
1556 SET_NETDEV_DEV(ndev, dev);
1558 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
1559 dev_dbg(dev, "set mask to 64bit\n");
1561 dev_err(dev, "set mask to 32bit fail!\n");
1563 /* carrier off reporting is important to ethtool even BEFORE open */
1564 netif_carrier_off(ndev);
1566 setup_timer(&priv->service_timer, hns_nic_service_timer,
1567 (unsigned long)priv);
1568 INIT_WORK(&priv->service_task, hns_nic_service_task);
1570 set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
1571 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
1572 set_bit(NIC_STATE_DOWN, &priv->state);
1574 if (hns_nic_try_get_ae(priv->netdev)) {
1575 priv->notifier_block.notifier_call = hns_nic_notifier_action;
1576 ret = hnae_register_notifier(&priv->notifier_block);
1578 dev_err(dev, "register notifier fail!\n");
1579 goto out_notify_fail;
1581 dev_dbg(dev, "has not handle, register notifier!\n");
1587 (void)cancel_work_sync(&priv->service_task);
1588 out_read_string_fail:
1593 static int hns_nic_dev_remove(struct platform_device *pdev)
1595 struct net_device *ndev = platform_get_drvdata(pdev);
1596 struct hns_nic_priv *priv = netdev_priv(ndev);
1598 if (ndev->reg_state != NETREG_UNINITIALIZED)
1599 unregister_netdev(ndev);
1601 if (priv->ring_data)
1602 hns_nic_uninit_ring_data(priv);
1603 priv->ring_data = NULL;
1606 phy_disconnect(priv->phy);
1609 if (!IS_ERR_OR_NULL(priv->ae_handle))
1610 hnae_put_handle(priv->ae_handle);
1611 priv->ae_handle = NULL;
1612 if (priv->notifier_block.notifier_call)
1613 hnae_unregister_notifier(&priv->notifier_block);
1614 priv->notifier_block.notifier_call = NULL;
1616 set_bit(NIC_STATE_REMOVING, &priv->state);
1617 (void)cancel_work_sync(&priv->service_task);
1623 static const struct of_device_id hns_enet_of_match[] = {
1624 {.compatible = "hisilicon,hns-nic-v1",},
1625 {.compatible = "hisilicon,hns-nic-v2",},
1629 MODULE_DEVICE_TABLE(of, hns_enet_of_match);
1631 static struct platform_driver hns_nic_dev_driver = {
1634 .owner = THIS_MODULE,
1635 .of_match_table = hns_enet_of_match,
1637 .probe = hns_nic_dev_probe,
1638 .remove = hns_nic_dev_remove,
1641 module_platform_driver(hns_nic_dev_driver);
1643 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
1644 MODULE_AUTHOR("Hisilicon, Inc.");
1645 MODULE_LICENSE("GPL");
1646 MODULE_ALIAS("platform:hns-nic");