1 /* sunvnet.c: Sun LDOM Virtual Network Driver.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/etherdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/if_vlan.h>
20 #if IS_ENABLED(CONFIG_IPV6)
21 #include <linux/icmpv6.h>
25 #include <net/route.h>
32 #define DRV_MODULE_NAME "sunvnet"
33 #define DRV_MODULE_VERSION "1.0"
34 #define DRV_MODULE_RELDATE "June 25, 2007"
36 static char version[] =
37 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
38 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
39 MODULE_DESCRIPTION("Sun LDOM virtual network driver");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(DRV_MODULE_VERSION);
43 #define VNET_MAX_TXQS 16
45 /* Heuristic for the number of times to exponentially backoff and
46 * retry sending an LDC trigger when EAGAIN is encountered
48 #define VNET_MAX_RETRIES 10
50 static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
52 /* Ordered from largest major to lowest */
53 static struct vio_version vnet_versions[] = {
54 { .major = 1, .minor = 6 },
55 { .major = 1, .minor = 0 },
58 static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
60 return vio_dring_avail(dr, VNET_TX_RING_SIZE);
63 static int vnet_handle_unknown(struct vnet_port *port, void *arg)
65 struct vio_msg_tag *pkt = arg;
67 pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
68 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
69 pr_err("Resetting connection\n");
71 ldc_disconnect(port->vio.lp);
76 static int vnet_send_attr(struct vio_driver_state *vio)
78 struct vnet_port *port = to_vnet_port(vio);
79 struct net_device *dev = port->vp->dev;
80 struct vio_net_attr_info pkt;
81 int framelen = ETH_FRAME_LEN;
84 memset(&pkt, 0, sizeof(pkt));
85 pkt.tag.type = VIO_TYPE_CTRL;
86 pkt.tag.stype = VIO_SUBTYPE_INFO;
87 pkt.tag.stype_env = VIO_ATTR_INFO;
88 pkt.tag.sid = vio_send_sid(vio);
89 if (vio_version_before(vio, 1, 2))
90 pkt.xfer_mode = VIO_DRING_MODE;
92 pkt.xfer_mode = VIO_NEW_DRING_MODE;
93 pkt.addr_type = VNET_ADDR_ETHERMAC;
95 for (i = 0; i < 6; i++)
96 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
97 if (vio_version_after(vio, 1, 3)) {
99 port->rmtu = min(VNET_MAXPACKET, port->rmtu);
100 pkt.mtu = port->rmtu;
102 port->rmtu = VNET_MAXPACKET;
103 pkt.mtu = port->rmtu;
105 if (vio_version_after_eq(vio, 1, 6))
106 pkt.options = VIO_TX_DRING;
107 } else if (vio_version_before(vio, 1, 3)) {
110 pkt.mtu = framelen + VLAN_HLEN;
113 pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
116 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
117 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
118 "cflags[0x%04x] lso_max[%u]\n",
119 pkt.xfer_mode, pkt.addr_type,
120 (unsigned long long)pkt.addr,
121 pkt.ack_freq, pkt.plnk_updt, pkt.options,
122 (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
125 return vio_ldc_send(vio, &pkt, sizeof(pkt));
128 static int handle_attr_info(struct vio_driver_state *vio,
129 struct vio_net_attr_info *pkt)
131 struct vnet_port *port = to_vnet_port(vio);
135 viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
136 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
137 " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
138 pkt->xfer_mode, pkt->addr_type,
139 (unsigned long long)pkt->addr,
140 pkt->ack_freq, pkt->plnk_updt, pkt->options,
141 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
142 pkt->ipv4_lso_maxlen);
144 pkt->tag.sid = vio_send_sid(vio);
146 xfer_mode = pkt->xfer_mode;
147 /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
148 if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
149 xfer_mode = VIO_NEW_DRING_MODE;
152 * < v1.3 - ETH_FRAME_LEN exactly
153 * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
155 * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
157 if (vio_version_before(vio, 1, 3)) {
158 localmtu = ETH_FRAME_LEN;
159 } else if (vio_version_after(vio, 1, 3)) {
160 localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
161 localmtu = min(pkt->mtu, localmtu);
164 localmtu = ETH_FRAME_LEN + VLAN_HLEN;
166 port->rmtu = localmtu;
168 /* for version >= 1.6, ACK packet mode we support */
169 if (vio_version_after_eq(vio, 1, 6)) {
170 pkt->xfer_mode = VIO_NEW_DRING_MODE;
171 pkt->options = VIO_TX_DRING;
174 if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
175 pkt->addr_type != VNET_ADDR_ETHERMAC ||
176 pkt->mtu != localmtu) {
177 viodbg(HS, "SEND NET ATTR NACK\n");
179 pkt->tag.stype = VIO_SUBTYPE_NACK;
181 (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
185 viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
186 "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
187 "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
188 pkt->xfer_mode, pkt->addr_type,
189 (unsigned long long)pkt->addr,
190 pkt->ack_freq, pkt->plnk_updt, pkt->options,
191 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
192 pkt->ipv4_lso_maxlen);
194 pkt->tag.stype = VIO_SUBTYPE_ACK;
196 return vio_ldc_send(vio, pkt, sizeof(*pkt));
201 static int handle_attr_ack(struct vio_driver_state *vio,
202 struct vio_net_attr_info *pkt)
204 viodbg(HS, "GOT NET ATTR ACK\n");
209 static int handle_attr_nack(struct vio_driver_state *vio,
210 struct vio_net_attr_info *pkt)
212 viodbg(HS, "GOT NET ATTR NACK\n");
217 static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
219 struct vio_net_attr_info *pkt = arg;
221 switch (pkt->tag.stype) {
222 case VIO_SUBTYPE_INFO:
223 return handle_attr_info(vio, pkt);
225 case VIO_SUBTYPE_ACK:
226 return handle_attr_ack(vio, pkt);
228 case VIO_SUBTYPE_NACK:
229 return handle_attr_nack(vio, pkt);
236 static void vnet_handshake_complete(struct vio_driver_state *vio)
238 struct vio_dring_state *dr;
240 dr = &vio->drings[VIO_DRIVER_RX_RING];
241 dr->snd_nxt = dr->rcv_nxt = 1;
243 dr = &vio->drings[VIO_DRIVER_TX_RING];
244 dr->snd_nxt = dr->rcv_nxt = 1;
247 /* The hypervisor interface that implements copying to/from imported
248 * memory from another domain requires that copies are done to 8-byte
249 * aligned buffers, and that the lengths of such copies are also 8-byte
252 * So we align skb->data to an 8-byte multiple and pad-out the data
253 * area so we can round the copy length up to the next multiple of
256 * The transmitter puts the actual start of the packet 6 bytes into
257 * the buffer it sends over, so that the IP headers after the ethernet
258 * header are aligned properly. These 6 bytes are not in the descriptor
259 * length, they are simply implied. This offset is represented using
260 * the VNET_PACKET_SKIP macro.
262 static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
265 struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
266 unsigned long addr, off;
271 addr = (unsigned long) skb->data;
272 off = ((addr + 7UL) & ~7UL) - addr;
274 skb_reserve(skb, off);
279 static int vnet_rx_one(struct vnet_port *port, unsigned int len,
280 struct ldc_trans_cookie *cookies, int ncookies)
282 struct net_device *dev = port->vp->dev;
283 unsigned int copy_len;
288 if (unlikely(len < ETH_ZLEN || len > port->rmtu)) {
289 dev->stats.rx_length_errors++;
293 skb = alloc_and_align_skb(dev, len);
295 if (unlikely(!skb)) {
296 dev->stats.rx_missed_errors++;
300 copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
301 skb_put(skb, copy_len);
302 err = ldc_copy(port->vio.lp, LDC_COPY_IN,
303 skb->data, copy_len, 0,
305 if (unlikely(err < 0)) {
306 dev->stats.rx_frame_errors++;
310 skb_pull(skb, VNET_PACKET_SKIP);
312 skb->protocol = eth_type_trans(skb, dev);
314 dev->stats.rx_packets++;
315 dev->stats.rx_bytes += len;
316 napi_gro_receive(&port->napi, skb);
323 dev->stats.rx_dropped++;
327 static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
328 u32 start, u32 end, u8 vio_dring_state)
330 struct vio_dring_data hdr = {
332 .type = VIO_TYPE_DATA,
333 .stype = VIO_SUBTYPE_ACK,
334 .stype_env = VIO_DRING_DATA,
335 .sid = vio_send_sid(&port->vio),
337 .dring_ident = dr->ident,
340 .state = vio_dring_state,
345 hdr.seq = dr->snd_nxt;
348 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
354 if ((delay <<= 1) > 128)
356 if (retries++ > VNET_MAX_RETRIES) {
357 pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
358 port->raddr[0], port->raddr[1],
359 port->raddr[2], port->raddr[3],
360 port->raddr[4], port->raddr[5]);
363 } while (err == -EAGAIN);
365 if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
366 port->stop_rx_idx = end;
367 port->stop_rx = true;
369 port->stop_rx_idx = 0;
370 port->stop_rx = false;
376 static u32 next_idx(u32 idx, struct vio_dring_state *dr)
378 if (++idx == dr->num_entries)
383 static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
386 idx = dr->num_entries - 1;
393 static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
394 struct vio_dring_state *dr,
397 struct vio_net_desc *desc = port->vio.desc_buf;
400 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
401 (index * dr->entry_size),
402 dr->cookies, dr->ncookies);
409 static int put_rx_desc(struct vnet_port *port,
410 struct vio_dring_state *dr,
411 struct vio_net_desc *desc,
416 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
417 (index * dr->entry_size),
418 dr->cookies, dr->ncookies);
425 static int vnet_walk_rx_one(struct vnet_port *port,
426 struct vio_dring_state *dr,
427 u32 index, int *needs_ack)
429 struct vio_net_desc *desc = get_rx_desc(port, dr, index);
430 struct vio_driver_state *vio = &port->vio;
433 BUG_ON(desc == NULL);
435 return PTR_ERR(desc);
437 if (desc->hdr.state != VIO_DESC_READY)
442 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
443 desc->hdr.state, desc->hdr.ack,
444 desc->size, desc->ncookies,
445 desc->cookies[0].cookie_addr,
446 desc->cookies[0].cookie_size);
448 err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
449 if (err == -ECONNRESET)
451 desc->hdr.state = VIO_DESC_DONE;
452 err = put_rx_desc(port, dr, desc, index);
455 *needs_ack = desc->hdr.ack;
459 static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
460 u32 start, u32 end, int *npkts, int budget)
462 struct vio_driver_state *vio = &port->vio;
463 int ack_start = -1, ack_end = -1;
464 bool send_ack = true;
466 end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
468 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
470 while (start != end) {
471 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
472 if (err == -ECONNRESET)
480 start = next_idx(start, dr);
481 if (ack && start != end) {
482 err = vnet_send_ack(port, dr, ack_start, ack_end,
484 if (err == -ECONNRESET)
488 if ((*npkts) >= budget) {
493 if (unlikely(ack_start == -1))
494 ack_start = ack_end = prev_idx(start, dr);
496 port->napi_resume = false;
497 return vnet_send_ack(port, dr, ack_start, ack_end,
500 port->napi_resume = true;
501 port->napi_stop_idx = ack_end;
506 static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
509 struct vio_dring_data *pkt = msgbuf;
510 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
511 struct vio_driver_state *vio = &port->vio;
513 viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
514 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
516 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
518 if (unlikely(pkt->seq != dr->rcv_nxt)) {
519 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
520 pkt->seq, dr->rcv_nxt);
524 if (!port->napi_resume)
527 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
529 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
533 static int idx_is_pending(struct vio_dring_state *dr, u32 end)
538 while (idx != dr->prod) {
543 idx = next_idx(idx, dr);
548 static int vnet_ack(struct vnet_port *port, void *msgbuf)
550 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
551 struct vio_dring_data *pkt = msgbuf;
552 struct net_device *dev;
555 struct vio_net_desc *desc;
556 struct netdev_queue *txq;
558 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
565 if (unlikely(!idx_is_pending(dr, end))) {
566 netif_tx_unlock(dev);
570 /* sync for race conditions with vnet_start_xmit() and tell xmit it
571 * is time to send a trigger.
573 dr->cons = next_idx(end, dr);
574 desc = vio_dring_entry(dr, dr->cons);
575 if (desc->hdr.state == VIO_DESC_READY && port->start_cons) {
576 /* vnet_start_xmit() just populated this dring but missed
577 * sending the "start" LDC message to the consumer.
578 * Send a "start" trigger on its behalf.
580 if (__vnet_tx_trigger(port, dr->cons) > 0)
581 port->start_cons = false;
583 port->start_cons = true;
585 port->start_cons = true;
587 netif_tx_unlock(dev);
589 txq = netdev_get_tx_queue(dev, port->q_index);
590 if (unlikely(netif_tx_queue_stopped(txq) &&
591 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
597 static int vnet_nack(struct vnet_port *port, void *msgbuf)
599 /* XXX just reset or similar XXX */
603 static int handle_mcast(struct vnet_port *port, void *msgbuf)
605 struct vio_net_mcast_info *pkt = msgbuf;
607 if (pkt->tag.stype != VIO_SUBTYPE_ACK)
608 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
618 /* Got back a STOPPED LDC message on port. If the queue is stopped,
619 * wake it up so that we'll send out another START message at the
622 static void maybe_tx_wakeup(struct vnet_port *port)
624 struct netdev_queue *txq;
626 txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
627 __netif_tx_lock(txq, smp_processor_id());
628 if (likely(netif_tx_queue_stopped(txq))) {
629 struct vio_dring_state *dr;
631 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
632 netif_tx_wake_queue(txq);
634 __netif_tx_unlock(txq);
637 static inline bool port_is_up(struct vnet_port *vnet)
639 struct vio_driver_state *vio = &vnet->vio;
641 return !!(vio->hs_state & VIO_HS_COMPLETE);
644 static int vnet_event_napi(struct vnet_port *port, int budget)
646 struct vio_driver_state *vio = &port->vio;
649 int event = (port->rx_event & LDC_EVENT_RESET);
652 if (unlikely(event == LDC_EVENT_RESET ||
653 event == LDC_EVENT_UP)) {
654 vio_link_state_change(vio, event);
656 if (event == LDC_EVENT_RESET) {
663 /* We may have multiple LDC events in rx_event. Unroll send_events() */
664 event = (port->rx_event & LDC_EVENT_UP);
665 port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP);
666 if (event == LDC_EVENT_UP)
668 event = port->rx_event;
669 if (!(event & LDC_EVENT_DATA_READY))
672 /* we dont expect any other bits than RESET, UP, DATA_READY */
673 BUG_ON(event != LDC_EVENT_DATA_READY);
678 struct vio_msg_tag tag;
682 if (port->napi_resume) {
683 struct vio_dring_data *pkt =
684 (struct vio_dring_data *)&msgbuf;
685 struct vio_dring_state *dr =
686 &port->vio.drings[VIO_DRIVER_RX_RING];
688 pkt->tag.type = VIO_TYPE_DATA;
689 pkt->tag.stype = VIO_SUBTYPE_INFO;
690 pkt->tag.stype_env = VIO_DRING_DATA;
691 pkt->seq = dr->rcv_nxt;
692 pkt->start_idx = next_idx(port->napi_stop_idx, dr);
696 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
697 if (unlikely(err < 0)) {
698 if (err == -ECONNRESET)
704 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
707 msgbuf.tag.stype_env,
709 err = vio_validate_sid(vio, &msgbuf.tag);
713 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
714 if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
715 if (!port_is_up(port)) {
716 /* failures like handshake_failure()
717 * may have cleaned up dring, but
718 * NAPI polling may bring us here.
723 err = vnet_rx(port, &msgbuf, &npkts, budget);
728 } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
729 err = vnet_ack(port, &msgbuf);
732 } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
733 err = vnet_nack(port, &msgbuf);
735 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
736 if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
737 err = handle_mcast(port, &msgbuf);
739 err = vio_control_pkt_engine(vio, &msgbuf);
743 err = vnet_handle_unknown(port, &msgbuf);
745 if (err == -ECONNRESET)
748 if (unlikely(tx_wakeup && err != -ECONNRESET))
749 maybe_tx_wakeup(port);
753 static int vnet_poll(struct napi_struct *napi, int budget)
755 struct vnet_port *port = container_of(napi, struct vnet_port, napi);
756 struct vio_driver_state *vio = &port->vio;
757 int processed = vnet_event_napi(port, budget);
759 if (processed < budget) {
761 port->rx_event &= ~LDC_EVENT_DATA_READY;
762 vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
767 static void vnet_event(void *arg, int event)
769 struct vnet_port *port = arg;
770 struct vio_driver_state *vio = &port->vio;
772 port->rx_event |= event;
773 vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
774 napi_schedule(&port->napi);
778 static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
780 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
781 struct vio_dring_data hdr = {
783 .type = VIO_TYPE_DATA,
784 .stype = VIO_SUBTYPE_INFO,
785 .stype_env = VIO_DRING_DATA,
786 .sid = vio_send_sid(&port->vio),
788 .dring_ident = dr->ident,
796 err = vnet_send_ack(port,
797 &port->vio.drings[VIO_DRIVER_RX_RING],
798 port->stop_rx_idx, -1,
804 hdr.seq = dr->snd_nxt;
807 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
813 if ((delay <<= 1) > 128)
815 if (retries++ > VNET_MAX_RETRIES)
817 } while (err == -EAGAIN);
822 struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
824 unsigned int hash = vnet_hashfn(skb->data);
825 struct hlist_head *hp = &vp->port_hash[hash];
826 struct vnet_port *port;
828 hlist_for_each_entry_rcu(port, hp, hash) {
829 if (!port_is_up(port))
831 if (ether_addr_equal(port->raddr, skb->data))
834 list_for_each_entry_rcu(port, &vp->port_list, list) {
835 if (!port->switch_port)
837 if (!port_is_up(port))
844 static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
847 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
848 struct sk_buff *skb = NULL;
855 txi = VNET_TX_RING_SIZE-1;
857 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
858 struct vio_net_desc *d;
860 d = vio_dring_entry(dr, txi);
862 if (d->hdr.state == VIO_DESC_DONE) {
863 if (port->tx_bufs[txi].skb) {
864 BUG_ON(port->tx_bufs[txi].skb->next);
866 port->tx_bufs[txi].skb->next = skb;
867 skb = port->tx_bufs[txi].skb;
868 port->tx_bufs[txi].skb = NULL;
870 ldc_unmap(port->vio.lp,
871 port->tx_bufs[txi].cookies,
872 port->tx_bufs[txi].ncookies);
874 d->hdr.state = VIO_DESC_FREE;
875 } else if (d->hdr.state == VIO_DESC_READY) {
877 } else if (d->hdr.state == VIO_DESC_FREE) {
882 txi = VNET_TX_RING_SIZE-1;
887 static inline void vnet_free_skbs(struct sk_buff *skb)
889 struct sk_buff *next;
899 static void vnet_clean_timer_expire(unsigned long port0)
901 struct vnet_port *port = (struct vnet_port *)port0;
902 struct sk_buff *freeskbs;
905 netif_tx_lock(port->vp->dev);
906 freeskbs = vnet_clean_tx_ring(port, &pending);
907 netif_tx_unlock(port->vp->dev);
909 vnet_free_skbs(freeskbs);
912 (void)mod_timer(&port->clean_timer,
913 jiffies + VNET_CLEAN_TIMEOUT);
915 del_timer(&port->clean_timer);
918 static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
921 struct sk_buff *nskb;
926 if (len < ETH_ZLEN) {
927 pad += ETH_ZLEN - skb->len;
930 len += VNET_PACKET_SKIP;
931 pad += 8 - (len & 7);
932 len += 8 - (len & 7);
934 if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
935 skb_tailroom(skb) < pad ||
936 skb_headroom(skb) < VNET_PACKET_SKIP) {
937 nskb = alloc_and_align_skb(skb->dev, skb->len);
938 skb_reserve(nskb, VNET_PACKET_SKIP);
939 if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
944 (void)skb_put(nskb, skb->len);
949 *pstart = skb->data - VNET_PACKET_SKIP;
955 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
956 void *accel_priv, select_queue_fallback_t fallback)
958 struct vnet *vp = netdev_priv(dev);
959 struct vnet_port *port = __tx_port_find(vp, skb);
961 return port->q_index;
964 static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
966 struct vnet *vp = netdev_priv(dev);
967 struct vnet_port *port = NULL;
968 struct vio_dring_state *dr;
969 struct vio_net_desc *d;
971 struct sk_buff *freeskbs = NULL;
975 unsigned pending = 0;
976 struct netdev_queue *txq;
978 skb = vnet_skb_shape(skb, &start, &nlen);
983 port = __tx_port_find(vp, skb);
987 if (skb->len > port->rmtu) {
988 unsigned long localmtu = port->rmtu - ETH_HLEN;
990 if (vio_version_after_eq(&port->vio, 1, 3))
991 localmtu -= VLAN_HLEN;
993 if (skb->protocol == htons(ETH_P_IP)) {
995 struct rtable *rt = NULL;
997 memset(&fl4, 0, sizeof(fl4));
998 fl4.flowi4_oif = dev->ifindex;
999 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
1000 fl4.daddr = ip_hdr(skb)->daddr;
1001 fl4.saddr = ip_hdr(skb)->saddr;
1003 rt = ip_route_output_key(dev_net(dev), &fl4);
1006 skb_dst_set(skb, &rt->dst);
1007 icmp_send(skb, ICMP_DEST_UNREACH,
1012 #if IS_ENABLED(CONFIG_IPV6)
1013 else if (skb->protocol == htons(ETH_P_IPV6))
1014 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
1019 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1020 i = skb_get_queue_mapping(skb);
1021 txq = netdev_get_tx_queue(dev, i);
1022 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1023 if (!netif_tx_queue_stopped(txq)) {
1024 netif_tx_stop_queue(txq);
1026 /* This is a hard error, log it. */
1027 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1028 dev->stats.tx_errors++;
1031 return NETDEV_TX_BUSY;
1034 d = vio_dring_cur(dr);
1038 freeskbs = vnet_clean_tx_ring(port, &pending);
1040 BUG_ON(port->tx_bufs[txi].skb);
1046 port->tx_bufs[txi].skb = skb;
1049 err = ldc_map_single(port->vio.lp, start, nlen,
1050 port->tx_bufs[txi].cookies, VNET_MAXCOOKIES,
1051 (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
1053 netdev_info(dev, "tx buffer map error %d\n", err);
1056 port->tx_bufs[txi].ncookies = err;
1058 /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
1059 * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
1060 * the protocol itself does not require it as long as the peer
1061 * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
1063 * An ACK for every packet in the ring is expensive as the
1064 * sending of LDC messages is slow and affects performance.
1066 d->hdr.ack = VIO_ACK_DISABLE;
1068 d->ncookies = port->tx_bufs[txi].ncookies;
1069 for (i = 0; i < d->ncookies; i++)
1070 d->cookies[i] = port->tx_bufs[txi].cookies[i];
1072 /* This has to be a non-SMP write barrier because we are writing
1073 * to memory which is shared with the peer LDOM.
1077 d->hdr.state = VIO_DESC_READY;
1079 /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
1080 * to notify the consumer that some descriptors are READY.
1081 * After that "start" trigger, no additional triggers are needed until
1082 * a DRING_STOPPED is received from the consumer. The dr->cons field
1083 * (set up by vnet_ack()) has the value of the next dring index
1084 * that has not yet been ack-ed. We send a "start" trigger here
1085 * if, and only if, start_cons is true (reset it afterward). Conversely,
1086 * vnet_ack() should check if the dring corresponding to cons
1087 * is marked READY, but start_cons was false.
1088 * If so, vnet_ack() should send out the missed "start" trigger.
1090 * Note that the wmb() above makes sure the cookies et al. are
1091 * not globally visible before the VIO_DESC_READY, and that the
1092 * stores are ordered correctly by the compiler. The consumer will
1093 * not proceed until the VIO_DESC_READY is visible assuring that
1094 * the consumer does not observe anything related to descriptors
1095 * out of order. The HV trap from the LDC start trigger is the
1096 * producer to consumer announcement that work is available to the
1099 if (!port->start_cons)
1100 goto ldc_start_done; /* previous trigger suffices */
1102 err = __vnet_tx_trigger(port, dr->cons);
1103 if (unlikely(err < 0)) {
1104 netdev_info(dev, "TX trigger error %d\n", err);
1105 d->hdr.state = VIO_DESC_FREE;
1106 dev->stats.tx_carrier_errors++;
1111 port->start_cons = false;
1113 dev->stats.tx_packets++;
1114 dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1116 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1117 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1118 netif_tx_stop_queue(txq);
1119 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1120 netif_tx_wake_queue(txq);
1123 (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
1126 vnet_free_skbs(freeskbs);
1128 return NETDEV_TX_OK;
1132 (void)mod_timer(&port->clean_timer,
1133 jiffies + VNET_CLEAN_TIMEOUT);
1135 del_timer(&port->clean_timer);
1140 vnet_free_skbs(freeskbs);
1141 dev->stats.tx_dropped++;
1142 return NETDEV_TX_OK;
1145 static void vnet_tx_timeout(struct net_device *dev)
1147 /* XXX Implement me XXX */
1150 static int vnet_open(struct net_device *dev)
1152 netif_carrier_on(dev);
1153 netif_tx_start_all_queues(dev);
1158 static int vnet_close(struct net_device *dev)
1160 netif_tx_stop_all_queues(dev);
1161 netif_carrier_off(dev);
1166 static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
1168 struct vnet_mcast_entry *m;
1170 for (m = vp->mcast_list; m; m = m->next) {
1171 if (ether_addr_equal(m->addr, addr))
1177 static void __update_mc_list(struct vnet *vp, struct net_device *dev)
1179 struct netdev_hw_addr *ha;
1181 netdev_for_each_mc_addr(ha, dev) {
1182 struct vnet_mcast_entry *m;
1184 m = __vnet_mc_find(vp, ha->addr);
1191 m = kzalloc(sizeof(*m), GFP_ATOMIC);
1194 memcpy(m->addr, ha->addr, ETH_ALEN);
1197 m->next = vp->mcast_list;
1203 static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
1205 struct vio_net_mcast_info info;
1206 struct vnet_mcast_entry *m, **pp;
1209 memset(&info, 0, sizeof(info));
1211 info.tag.type = VIO_TYPE_CTRL;
1212 info.tag.stype = VIO_SUBTYPE_INFO;
1213 info.tag.stype_env = VNET_MCAST_INFO;
1214 info.tag.sid = vio_send_sid(&port->vio);
1218 for (m = vp->mcast_list; m; m = m->next) {
1222 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1224 if (++n_addrs == VNET_NUM_MCAST) {
1225 info.count = n_addrs;
1227 (void) vio_ldc_send(&port->vio, &info,
1233 info.count = n_addrs;
1234 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
1240 pp = &vp->mcast_list;
1241 while ((m = *pp) != NULL) {
1248 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1250 if (++n_addrs == VNET_NUM_MCAST) {
1251 info.count = n_addrs;
1252 (void) vio_ldc_send(&port->vio, &info,
1261 info.count = n_addrs;
1262 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
1266 static void vnet_set_rx_mode(struct net_device *dev)
1268 struct vnet *vp = netdev_priv(dev);
1269 struct vnet_port *port;
1272 list_for_each_entry_rcu(port, &vp->port_list, list) {
1274 if (port->switch_port) {
1275 __update_mc_list(vp, dev);
1276 __send_mc_list(vp, port);
1283 static int vnet_change_mtu(struct net_device *dev, int new_mtu)
1285 if (new_mtu < 68 || new_mtu > 65535)
1292 static int vnet_set_mac_addr(struct net_device *dev, void *p)
1297 static void vnet_get_drvinfo(struct net_device *dev,
1298 struct ethtool_drvinfo *info)
1300 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1301 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1304 static u32 vnet_get_msglevel(struct net_device *dev)
1306 struct vnet *vp = netdev_priv(dev);
1307 return vp->msg_enable;
1310 static void vnet_set_msglevel(struct net_device *dev, u32 value)
1312 struct vnet *vp = netdev_priv(dev);
1313 vp->msg_enable = value;
1316 static const struct ethtool_ops vnet_ethtool_ops = {
1317 .get_drvinfo = vnet_get_drvinfo,
1318 .get_msglevel = vnet_get_msglevel,
1319 .set_msglevel = vnet_set_msglevel,
1320 .get_link = ethtool_op_get_link,
1323 static void vnet_port_free_tx_bufs(struct vnet_port *port)
1325 struct vio_dring_state *dr;
1328 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1330 ldc_free_exp_dring(port->vio.lp, dr->base,
1331 (dr->entry_size * dr->num_entries),
1332 dr->cookies, dr->ncookies);
1335 dr->num_entries = 0;
1340 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1341 struct vio_net_desc *d;
1342 void *skb = port->tx_bufs[i].skb;
1347 d = vio_dring_entry(dr, i);
1348 if (d->hdr.state == VIO_DESC_READY)
1349 pr_warn("active transmit buffers freed\n");
1351 ldc_unmap(port->vio.lp,
1352 port->tx_bufs[i].cookies,
1353 port->tx_bufs[i].ncookies);
1355 port->tx_bufs[i].skb = NULL;
1356 d->hdr.state = VIO_DESC_FREE;
1360 static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
1362 struct vio_dring_state *dr;
1364 int i, err, ncookies;
1367 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1369 len = (VNET_TX_RING_SIZE *
1370 (sizeof(struct vio_net_desc) +
1371 (sizeof(struct ldc_trans_cookie) * 2)));
1373 ncookies = VIO_MAX_RING_COOKIES;
1374 dring = ldc_alloc_exp_dring(port->vio.lp, len,
1375 dr->cookies, &ncookies,
1379 if (IS_ERR(dring)) {
1380 err = PTR_ERR(dring);
1385 dr->entry_size = (sizeof(struct vio_net_desc) +
1386 (sizeof(struct ldc_trans_cookie) * 2));
1387 dr->num_entries = VNET_TX_RING_SIZE;
1388 dr->prod = dr->cons = 0;
1389 port->start_cons = true; /* need an initial trigger */
1390 dr->pending = VNET_TX_RING_SIZE;
1391 dr->ncookies = ncookies;
1393 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
1394 struct vio_net_desc *d;
1396 d = vio_dring_entry(dr, i);
1397 d->hdr.state = VIO_DESC_FREE;
1402 vnet_port_free_tx_bufs(port);
1407 #ifdef CONFIG_NET_POLL_CONTROLLER
1408 static void vnet_poll_controller(struct net_device *dev)
1410 struct vnet *vp = netdev_priv(dev);
1411 struct vnet_port *port;
1412 unsigned long flags;
1414 spin_lock_irqsave(&vp->lock, flags);
1415 if (!list_empty(&vp->port_list)) {
1416 port = list_entry(vp->port_list.next, struct vnet_port, list);
1417 napi_schedule(&port->napi);
1419 spin_unlock_irqrestore(&vp->lock, flags);
1422 static LIST_HEAD(vnet_list);
1423 static DEFINE_MUTEX(vnet_list_mutex);
1425 static const struct net_device_ops vnet_ops = {
1426 .ndo_open = vnet_open,
1427 .ndo_stop = vnet_close,
1428 .ndo_set_rx_mode = vnet_set_rx_mode,
1429 .ndo_set_mac_address = vnet_set_mac_addr,
1430 .ndo_validate_addr = eth_validate_addr,
1431 .ndo_tx_timeout = vnet_tx_timeout,
1432 .ndo_change_mtu = vnet_change_mtu,
1433 .ndo_start_xmit = vnet_start_xmit,
1434 .ndo_select_queue = vnet_select_queue,
1435 #ifdef CONFIG_NET_POLL_CONTROLLER
1436 .ndo_poll_controller = vnet_poll_controller,
1440 static struct vnet *vnet_new(const u64 *local_mac)
1442 struct net_device *dev;
1446 dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1);
1448 return ERR_PTR(-ENOMEM);
1449 dev->needed_headroom = VNET_PACKET_SKIP + 8;
1450 dev->needed_tailroom = 8;
1452 for (i = 0; i < ETH_ALEN; i++)
1453 dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
1455 vp = netdev_priv(dev);
1457 spin_lock_init(&vp->lock);
1460 INIT_LIST_HEAD(&vp->port_list);
1461 for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
1462 INIT_HLIST_HEAD(&vp->port_hash[i]);
1463 INIT_LIST_HEAD(&vp->list);
1464 vp->local_mac = *local_mac;
1466 dev->netdev_ops = &vnet_ops;
1467 dev->ethtool_ops = &vnet_ethtool_ops;
1468 dev->watchdog_timeo = VNET_TX_TIMEOUT;
1470 err = register_netdev(dev);
1472 pr_err("Cannot register net device, aborting\n");
1473 goto err_out_free_dev;
1476 netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
1478 list_add(&vp->list, &vnet_list);
1485 return ERR_PTR(err);
1488 static struct vnet *vnet_find_or_create(const u64 *local_mac)
1490 struct vnet *iter, *vp;
1492 mutex_lock(&vnet_list_mutex);
1494 list_for_each_entry(iter, &vnet_list, list) {
1495 if (iter->local_mac == *local_mac) {
1501 vp = vnet_new(local_mac);
1502 mutex_unlock(&vnet_list_mutex);
1507 static void vnet_cleanup(void)
1510 struct net_device *dev;
1512 mutex_lock(&vnet_list_mutex);
1513 while (!list_empty(&vnet_list)) {
1514 vp = list_first_entry(&vnet_list, struct vnet, list);
1515 list_del(&vp->list);
1517 /* vio_unregister_driver() should have cleaned up port_list */
1518 BUG_ON(!list_empty(&vp->port_list));
1519 unregister_netdev(dev);
1522 mutex_unlock(&vnet_list_mutex);
1525 static const char *local_mac_prop = "local-mac-address";
1527 static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1530 const u64 *local_mac = NULL;
1533 mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
1534 u64 target = mdesc_arc_target(hp, a);
1537 name = mdesc_get_property(hp, target, "name", NULL);
1538 if (!name || strcmp(name, "network"))
1541 local_mac = mdesc_get_property(hp, target,
1542 local_mac_prop, NULL);
1547 return ERR_PTR(-ENODEV);
1549 return vnet_find_or_create(local_mac);
1552 static struct ldc_channel_config vnet_ldc_cfg = {
1553 .event = vnet_event,
1555 .mode = LDC_MODE_UNRELIABLE,
1558 static struct vio_driver_ops vnet_vio_ops = {
1559 .send_attr = vnet_send_attr,
1560 .handle_attr = vnet_handle_attr,
1561 .handshake_complete = vnet_handshake_complete,
1564 static void print_version(void)
1566 printk_once(KERN_INFO "%s", version);
1569 const char *remote_macaddr_prop = "remote-mac-address";
1572 vnet_port_add_txq(struct vnet_port *port)
1574 struct vnet *vp = port->vp;
1578 n = n & (VNET_MAX_TXQS - 1);
1580 netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
1584 vnet_port_rm_txq(struct vnet_port *port)
1587 netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
1590 static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1592 struct mdesc_handle *hp;
1593 struct vnet_port *port;
1594 unsigned long flags;
1597 int len, i, err, switch_port;
1603 vp = vnet_find_parent(hp, vdev->mp);
1605 pr_err("Cannot find port parent vnet\n");
1607 goto err_out_put_mdesc;
1610 rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
1613 pr_err("Port lacks %s property\n", remote_macaddr_prop);
1614 goto err_out_put_mdesc;
1617 port = kzalloc(sizeof(*port), GFP_KERNEL);
1620 goto err_out_put_mdesc;
1622 for (i = 0; i < ETH_ALEN; i++)
1623 port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
1627 err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
1628 vnet_versions, ARRAY_SIZE(vnet_versions),
1629 &vnet_vio_ops, vp->dev->name);
1631 goto err_out_free_port;
1633 err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
1635 goto err_out_free_port;
1637 netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT);
1639 err = vnet_port_alloc_tx_bufs(port);
1641 goto err_out_free_ldc;
1643 INIT_HLIST_NODE(&port->hash);
1644 INIT_LIST_HEAD(&port->list);
1647 if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
1649 port->switch_port = switch_port;
1651 spin_lock_irqsave(&vp->lock, flags);
1653 list_add_rcu(&port->list, &vp->port_list);
1655 list_add_tail_rcu(&port->list, &vp->port_list);
1656 hlist_add_head_rcu(&port->hash,
1657 &vp->port_hash[vnet_hashfn(port->raddr)]);
1658 vnet_port_add_txq(port);
1659 spin_unlock_irqrestore(&vp->lock, flags);
1661 dev_set_drvdata(&vdev->dev, port);
1663 pr_info("%s: PORT ( remote-mac %pM%s )\n",
1664 vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
1666 setup_timer(&port->clean_timer, vnet_clean_timer_expire,
1667 (unsigned long)port);
1669 napi_enable(&port->napi);
1670 vio_port_up(&port->vio);
1677 netif_napi_del(&port->napi);
1678 vio_ldc_free(&port->vio);
1688 static int vnet_port_remove(struct vio_dev *vdev)
1690 struct vnet_port *port = dev_get_drvdata(&vdev->dev);
1694 del_timer_sync(&port->vio.timer);
1696 napi_disable(&port->napi);
1698 list_del_rcu(&port->list);
1699 hlist_del_rcu(&port->hash);
1702 del_timer_sync(&port->clean_timer);
1703 vnet_port_rm_txq(port);
1704 netif_napi_del(&port->napi);
1705 vnet_port_free_tx_bufs(port);
1706 vio_ldc_free(&port->vio);
1708 dev_set_drvdata(&vdev->dev, NULL);
1716 static const struct vio_device_id vnet_port_match[] = {
1718 .type = "vnet-port",
1722 MODULE_DEVICE_TABLE(vio, vnet_port_match);
1724 static struct vio_driver vnet_port_driver = {
1725 .id_table = vnet_port_match,
1726 .probe = vnet_port_probe,
1727 .remove = vnet_port_remove,
1728 .name = "vnet_port",
1731 static int __init vnet_init(void)
1733 return vio_register_driver(&vnet_port_driver);
1736 static void __exit vnet_exit(void)
1738 vio_unregister_driver(&vnet_port_driver);
1742 module_init(vnet_init);
1743 module_exit(vnet_exit);