]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next...
authorDavid S. Miller <davem@davemloft.net>
Mon, 2 May 2016 03:38:49 +0000 (23:38 -0400)
committerDavid S. Miller <davem@davemloft.net>
Mon, 2 May 2016 03:38:49 +0000 (23:38 -0400)
Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2016-05-01

This series contains updates to i40e and i40evf.

The theme of this series is code reduction, with several code cleanups in
this series.  Starting with Neerav's removal of the code that implemented
the HMC AQ APIs and calls, since they are now obsolete and not supported
by firmware.

Anjali changes the default of VFs to make sure they are not trusted or
privileged until its explicitly set for trust through the new NDO op
interface.  Also limited the number of MAC and VLAN addresses a VF can
add if it is untrusted/privileged.

Carolyn syncs the VF code for the changes made to the PF for the RSS
hash tuple settings, which ends up cleaning up much of the existing code.

Jesse cleans up compiler warnings which were found with gcc's W=2 option.
Then removed duplicate code, especially since only one copy was actually
being used.

Jacob addresses an issue which was found when testing GCC 6's which
happens to produce new warnings when you left shift a signed value
beyond the storage sizeof the type.  The converts i40e & i40evf to use
the BIT() macro more consistently.

Alex actually bucks the trend of code removal by adding support for
both drivers to use GSO_PARTIAL so that segmentation of frames with
checksums enabled in outer headers is supported.  Fortunately it does
not take much to add this support!
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/phy/mdio_bus.c
drivers/of/of_mdio.c
include/net/sctp/structs.h
net/sctp/sm_sideeffect.c
net/sctp/ulpqueue.c
net/tipc/node.c

index 499003ee8055455c9b5db9bfaaa45460a17aea73..388f9922647b3ef4db5a1b6f6562b7646ed4af25 100644 (file)
@@ -333,7 +333,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
                        struct phy_device *phydev;
 
                        phydev = mdiobus_scan(bus, i);
-                       if (IS_ERR(phydev)) {
+                       if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) {
                                err = PTR_ERR(phydev);
                                goto error;
                        }
index b622b33dbf93e1dc8742574ed614a3d107de9022..e051e1b57609e45afa170f1d92cde38c7beefe5e 100644 (file)
@@ -209,6 +209,10 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
        bool scanphys = false;
        int addr, rc;
 
+       /* Do not continue if the node is disabled */
+       if (!of_device_is_available(np))
+               return -ENODEV;
+
        /* Mask out all PHYs from auto probing.  Instead the PHYs listed in
         * the device tree are populated after the bus has been registered */
        mdio->phy_mask = ~0;
index 558bae3cbe0d5107d52c8cb31b324cfd5479def0..16b013a6191cf1c416e4dd1aeb1707a8569ea49b 100644 (file)
@@ -218,7 +218,7 @@ struct sctp_sock {
                frag_interleave:1,
                recvrcvinfo:1,
                recvnxtinfo:1,
-               pending_data_ready:1;
+               data_ready_signalled:1;
 
        atomic_t pd_mode;
        /* Receive to here while partial delivery is in effect. */
index e8f0112f9b28472c39c4c91dcb28576373c858e7..aa37122593684d8501fdca15983fbd8620fabe07 100644 (file)
@@ -1741,10 +1741,9 @@ out:
        } else if (local_cork)
                error = sctp_outq_uncork(&asoc->outqueue, gfp);
 
-       if (sp->pending_data_ready) {
-               sk->sk_data_ready(sk);
-               sp->pending_data_ready = 0;
-       }
+       if (sp->data_ready_signalled)
+               sp->data_ready_signalled = 0;
+
        return error;
 nomem:
        error = -ENOMEM;
index ec12a8920e5fd7a0f26d19f1695bc2feeae41518..ec166d2bd2d95d9aa69369da2ead9437da4ce8ed 100644 (file)
@@ -194,6 +194,7 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 {
        struct sock *sk = ulpq->asoc->base.sk;
+       struct sctp_sock *sp = sctp_sk(sk);
        struct sk_buff_head *queue, *skb_list;
        struct sk_buff *skb = sctp_event2skb(event);
        int clear_pd = 0;
@@ -211,7 +212,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
                sk_incoming_cpu_update(sk);
        }
        /* Check if the user wishes to receive this event.  */
-       if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
+       if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
                goto out_free;
 
        /* If we are in partial delivery mode, post to the lobby until
@@ -219,7 +220,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
         * the association the cause of the partial delivery.
         */
 
-       if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
+       if (atomic_read(&sp->pd_mode) == 0) {
                queue = &sk->sk_receive_queue;
        } else {
                if (ulpq->pd_mode) {
@@ -231,7 +232,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
                        if ((event->msg_flags & MSG_NOTIFICATION) ||
                            (SCTP_DATA_NOT_FRAG ==
                                    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
-                               queue = &sctp_sk(sk)->pd_lobby;
+                               queue = &sp->pd_lobby;
                        else {
                                clear_pd = event->msg_flags & MSG_EOR;
                                queue = &sk->sk_receive_queue;
@@ -242,10 +243,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
                         * can queue this to the receive queue instead
                         * of the lobby.
                         */
-                       if (sctp_sk(sk)->frag_interleave)
+                       if (sp->frag_interleave)
                                queue = &sk->sk_receive_queue;
                        else
-                               queue = &sctp_sk(sk)->pd_lobby;
+                               queue = &sp->pd_lobby;
                }
        }
 
@@ -264,8 +265,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
        if (clear_pd)
                sctp_ulpq_clear_pd(ulpq);
 
-       if (queue == &sk->sk_receive_queue)
-               sctp_sk(sk)->pending_data_ready = 1;
+       if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
+               sp->data_ready_signalled = 1;
+               sk->sk_data_ready(sk);
+       }
        return 1;
 
 out_free:
@@ -1126,11 +1129,13 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
 {
        struct sctp_ulpevent *ev = NULL;
        struct sock *sk;
+       struct sctp_sock *sp;
 
        if (!ulpq->pd_mode)
                return;
 
        sk = ulpq->asoc->base.sk;
+       sp = sctp_sk(sk);
        if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
                                       &sctp_sk(sk)->subscribe))
                ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
@@ -1140,6 +1145,8 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
                __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
 
        /* If there is data waiting, send it up the socket now. */
-       if (sctp_ulpq_clear_pd(ulpq) || ev)
-               sctp_sk(sk)->pending_data_ready = 1;
+       if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
+               sp->data_ready_signalled = 1;
+               sk->sk_data_ready(sk);
+       }
 }
index 68d9f7b8485c5eebcf2781a2b5e7f0e64adbc2da..c299156882307053ec5322edb94e691422596c77 100644 (file)
@@ -554,6 +554,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
                *slot1 = bearer_id;
                tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
                n->action_flags |= TIPC_NOTIFY_NODE_UP;
+               tipc_link_set_active(nl, true);
                tipc_bcast_add_peer(n->net, nl, xmitq);
                return;
        }