]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge tag 'linux-can-next-for-4.1-20150323' of git://git.kernel.org/pub/scm/linux...
authorDavid S. Miller <davem@davemloft.net>
Tue, 24 Mar 2015 02:03:43 +0000 (22:03 -0400)
committerDavid S. Miller <davem@davemloft.net>
Tue, 24 Mar 2015 02:03:43 +0000 (22:03 -0400)
Marc Kleine-Budde says:

====================
pull-request: can-next 2015-03-23

this is a pull request of 6 patches for net-next/master.

A patch by Florian Westphal, converts the skb->destructor to use
sock_efree() instead of own destructor. Ahmed S. Darwish's patch
converts the kvaser_usb driver to use unregister_candev(). A patch by
me removes a return from a void function in the m_can driver. Yegor
Yefremov contributes a patch for combined rx/tx LED trigger support. A
sparse warning in the esd_usb2 driver was fixes by Thomas Körper. Ben
Dooks converts the at91_can driver to use endian agnostic IO accessors.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
41 files changed:
Documentation/networking/packet_mmap.txt
crypto/af_alg.c
crypto/algif_skcipher.c
drivers/isdn/gigaset/ev-layer.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bgmac.h
drivers/net/vxlan.c
include/crypto/if_alg.h
include/linux/dccp.h
include/linux/netfilter_bridge.h
include/linux/socket.h
include/net/request_sock.h
include/net/tcp.h
include/uapi/linux/if_packet.h
net/bridge/br_netfilter.c
net/compat.c
net/core/dev.c
net/core/request_sock.c
net/core/sock.c
net/dccp/dccp.h
net/dccp/ipv4.c
net/dccp/ipv6.c
net/ipv4/fib_trie.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/ip_output.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_timer.c
net/ipv6/inet6_connection_sock.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/tcp_ipv6.c
net/netfilter/nf_conntrack_acct.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nft_rbtree.c
net/netfilter/xt_physdev.c
net/packet/af_packet.c
net/socket.c
net/switchdev/switchdev.c

index a6d7cb91069e207b24bae3a2f4e2925475639640..daa015af16a092a8d4b7fd1df73f2bbe282251d9 100644 (file)
@@ -440,9 +440,10 @@ and the following flags apply:
 +++ Capture process:
      from include/linux/if_packet.h
 
-     #define TP_STATUS_COPY          2 
-     #define TP_STATUS_LOSING        4 
-     #define TP_STATUS_CSUMNOTREADY  8 
+     #define TP_STATUS_COPY          (1 << 1)
+     #define TP_STATUS_LOSING        (1 << 2)
+     #define TP_STATUS_CSUMNOTREADY  (1 << 3)
+     #define TP_STATUS_CSUM_VALID    (1 << 7)
 
 TP_STATUS_COPY        : This flag indicates that the frame (and associated
                         meta information) has been truncated because it's 
@@ -466,6 +467,12 @@ TP_STATUS_CSUMNOTREADY: currently it's used for outgoing IP packets which
                         reading the packet we should not try to check the 
                         checksum. 
 
+TP_STATUS_CSUM_VALID  : This flag indicates that at least the transport
+                        header checksum of the packet has been already
+                        validated on the kernel side. If the flag is not set
+                        then we are free to check the checksum by ourselves
+                        provided that TP_STATUS_CSUMNOTREADY is also not set.
+
 for convenience there are also the following defines:
 
      #define TP_STATUS_KERNEL        0
index 7f8b7edcadca3f64168e3451e52642a485a5d51f..26089d182cb70086f8278d05f15f07bafa4e3ab1 100644 (file)
@@ -358,8 +358,8 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
        npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (WARN_ON(npages == 0))
                return -EINVAL;
-
-       sg_init_table(sgl->sg, npages);
+       /* Add one extra for linking */
+       sg_init_table(sgl->sg, npages + 1);
 
        for (i = 0, len = n; i < npages; i++) {
                int plen = min_t(int, len, PAGE_SIZE - off);
@@ -369,18 +369,26 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
                off = 0;
                len -= plen;
        }
+       sg_mark_end(sgl->sg + npages - 1);
+       sgl->npages = npages;
+
        return n;
 }
 EXPORT_SYMBOL_GPL(af_alg_make_sg);
 
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new)
+{
+       sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
+       sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
+}
+EXPORT_SYMBOL(af_alg_link_sg);
+
 void af_alg_free_sg(struct af_alg_sgl *sgl)
 {
        int i;
 
-       i = 0;
-       do {
+       for (i = 0; i < sgl->npages; i++)
                put_page(sgl->pages[i]);
-       } while (!sg_is_last(sgl->sg + (i++)));
 }
 EXPORT_SYMBOL_GPL(af_alg_free_sg);
 
index b9743dc35801e0f7c6a5de1dcb22bc5699ac1047..8276f21ea7beac7c37a43983b65ddd5b9c0e3d98 100644 (file)
@@ -39,6 +39,7 @@ struct skcipher_ctx {
 
        struct af_alg_completion completion;
 
+       atomic_t inflight;
        unsigned used;
 
        unsigned int len;
@@ -49,9 +50,65 @@ struct skcipher_ctx {
        struct ablkcipher_request req;
 };
 
+struct skcipher_async_rsgl {
+       struct af_alg_sgl sgl;
+       struct list_head list;
+};
+
+struct skcipher_async_req {
+       struct kiocb *iocb;
+       struct skcipher_async_rsgl first_sgl;
+       struct list_head list;
+       struct scatterlist *tsg;
+       char iv[];
+};
+
+#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
+       crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
+
+#define GET_REQ_SIZE(ctx) \
+       crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
+
+#define GET_IV_SIZE(ctx) \
+       crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
+
 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
                      sizeof(struct scatterlist) - 1)
 
+static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
+{
+       struct skcipher_async_rsgl *rsgl, *tmp;
+       struct scatterlist *sgl;
+       struct scatterlist *sg;
+       int i, n;
+
+       list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
+               af_alg_free_sg(&rsgl->sgl);
+               if (rsgl != &sreq->first_sgl)
+                       kfree(rsgl);
+       }
+       sgl = sreq->tsg;
+       n = sg_nents(sgl);
+       for_each_sg(sgl, sg, n, i)
+               put_page(sg_page(sg));
+
+       kfree(sreq->tsg);
+}
+
+static void skcipher_async_cb(struct crypto_async_request *req, int err)
+{
+       struct sock *sk = req->data;
+       struct alg_sock *ask = alg_sk(sk);
+       struct skcipher_ctx *ctx = ask->private;
+       struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+       struct kiocb *iocb = sreq->iocb;
+
+       atomic_dec(&ctx->inflight);
+       skcipher_free_async_sgls(sreq);
+       kfree(req);
+       aio_complete(iocb, err, err);
+}
+
 static inline int skcipher_sndbuf(struct sock *sk)
 {
        struct alg_sock *ask = alg_sk(sk);
@@ -96,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
        return 0;
 }
 
-static void skcipher_pull_sgl(struct sock *sk, int used)
+static void skcipher_pull_sgl(struct sock *sk, int used, int put)
 {
        struct alg_sock *ask = alg_sk(sk);
        struct skcipher_ctx *ctx = ask->private;
@@ -123,8 +180,8 @@ static void skcipher_pull_sgl(struct sock *sk, int used)
 
                        if (sg[i].length)
                                return;
-
-                       put_page(sg_page(sg + i));
+                       if (put)
+                               put_page(sg_page(sg + i));
                        sg_assign_page(sg + i, NULL);
                }
 
@@ -143,7 +200,7 @@ static void skcipher_free_sgl(struct sock *sk)
        struct alg_sock *ask = alg_sk(sk);
        struct skcipher_ctx *ctx = ask->private;
 
-       skcipher_pull_sgl(sk, ctx->used);
+       skcipher_pull_sgl(sk, ctx->used, 1);
 }
 
 static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
@@ -424,8 +481,149 @@ unlock:
        return err ?: size;
 }
 
-static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
-                           size_t ignored, int flags)
+static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
+{
+       struct skcipher_sg_list *sgl;
+       struct scatterlist *sg;
+       int nents = 0;
+
+       list_for_each_entry(sgl, &ctx->tsgl, list) {
+               sg = sgl->sg;
+
+               while (!sg->length)
+                       sg++;
+
+               nents += sg_nents(sg);
+       }
+       return nents;
+}
+
+static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
+                                 int flags)
+{
+       struct sock *sk = sock->sk;
+       struct alg_sock *ask = alg_sk(sk);
+       struct skcipher_ctx *ctx = ask->private;
+       struct skcipher_sg_list *sgl;
+       struct scatterlist *sg;
+       struct skcipher_async_req *sreq;
+       struct ablkcipher_request *req;
+       struct skcipher_async_rsgl *last_rsgl = NULL;
+       unsigned int len = 0, tx_nents = skcipher_all_sg_nents(ctx);
+       unsigned int reqlen = sizeof(struct skcipher_async_req) +
+                               GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+       int i = 0;
+       int err = -ENOMEM;
+
+       lock_sock(sk);
+       req = kmalloc(reqlen, GFP_KERNEL);
+       if (unlikely(!req))
+               goto unlock;
+
+       sreq = GET_SREQ(req, ctx);
+       sreq->iocb = msg->msg_iocb;
+       memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
+       INIT_LIST_HEAD(&sreq->list);
+       sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
+       if (unlikely(!sreq->tsg)) {
+               kfree(req);
+               goto unlock;
+       }
+       sg_init_table(sreq->tsg, tx_nents);
+       memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
+       ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req));
+       ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                       skcipher_async_cb, sk);
+
+       while (iov_iter_count(&msg->msg_iter)) {
+               struct skcipher_async_rsgl *rsgl;
+               unsigned long used;
+
+               if (!ctx->used) {
+                       err = skcipher_wait_for_data(sk, flags);
+                       if (err)
+                               goto free;
+               }
+               sgl = list_first_entry(&ctx->tsgl,
+                                      struct skcipher_sg_list, list);
+               sg = sgl->sg;
+
+               while (!sg->length)
+                       sg++;
+
+               used = min_t(unsigned long, ctx->used,
+                            iov_iter_count(&msg->msg_iter));
+               used = min_t(unsigned long, used, sg->length);
+
+               if (i == tx_nents) {
+                       struct scatterlist *tmp;
+                       int x;
+                       /* Ran out of tx slots in async request
+                        * need to expand */
+                       tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
+                                     GFP_KERNEL);
+                       if (!tmp)
+                               goto free;
+
+                       sg_init_table(tmp, tx_nents * 2);
+                       for (x = 0; x < tx_nents; x++)
+                               sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
+                                           sreq->tsg[x].length,
+                                           sreq->tsg[x].offset);
+                       kfree(sreq->tsg);
+                       sreq->tsg = tmp;
+                       tx_nents *= 2;
+               }
+               /* Need to take over the tx sgl from ctx
+                * to the asynch req - these sgls will be freed later */
+               sg_set_page(sreq->tsg + i++, sg_page(sg), sg->length,
+                           sg->offset);
+
+               if (list_empty(&sreq->list)) {
+                       rsgl = &sreq->first_sgl;
+                       list_add_tail(&rsgl->list, &sreq->list);
+               } else {
+                       rsgl = kzalloc(sizeof(*rsgl), GFP_KERNEL);
+                       if (!rsgl) {
+                               err = -ENOMEM;
+                               goto free;
+                       }
+                       list_add_tail(&rsgl->list, &sreq->list);
+               }
+
+               used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
+               err = used;
+               if (used < 0)
+                       goto free;
+               if (last_rsgl)
+                       af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
+
+               last_rsgl = rsgl;
+               len += used;
+               skcipher_pull_sgl(sk, used, 0);
+               iov_iter_advance(&msg->msg_iter, used);
+       }
+
+       ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
+                                    len, sreq->iv);
+       err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
+                        crypto_ablkcipher_decrypt(req);
+       if (err == -EINPROGRESS) {
+               atomic_inc(&ctx->inflight);
+               err = -EIOCBQUEUED;
+               goto unlock;
+       }
+free:
+       skcipher_free_async_sgls(sreq);
+       kfree(req);
+unlock:
+       skcipher_wmem_wakeup(sk);
+       release_sock(sk);
+       return err;
+}
+
+static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
+                                int flags)
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
@@ -484,7 +682,7 @@ free:
                        goto unlock;
 
                copied += used;
-               skcipher_pull_sgl(sk, used);
+               skcipher_pull_sgl(sk, used, 1);
                iov_iter_advance(&msg->msg_iter, used);
        }
 
@@ -497,6 +695,13 @@ unlock:
        return copied ?: err;
 }
 
+static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+                           size_t ignored, int flags)
+{
+       return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
+               skcipher_recvmsg_async(sock, msg, flags) :
+               skcipher_recvmsg_sync(sock, msg, flags);
+}
 
 static unsigned int skcipher_poll(struct file *file, struct socket *sock,
                                  poll_table *wait)
@@ -555,12 +760,25 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
        return crypto_ablkcipher_setkey(private, key, keylen);
 }
 
+static void skcipher_wait(struct sock *sk)
+{
+       struct alg_sock *ask = alg_sk(sk);
+       struct skcipher_ctx *ctx = ask->private;
+       int ctr = 0;
+
+       while (atomic_read(&ctx->inflight) && ctr++ < 100)
+               msleep(100);
+}
+
 static void skcipher_sock_destruct(struct sock *sk)
 {
        struct alg_sock *ask = alg_sk(sk);
        struct skcipher_ctx *ctx = ask->private;
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
 
+       if (atomic_read(&ctx->inflight))
+               skcipher_wait(sk);
+
        skcipher_free_sgl(sk);
        sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
        sock_kfree_s(sk, ctx, ctx->len);
@@ -592,6 +810,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
        ctx->more = 0;
        ctx->merge = 0;
        ctx->enc = 0;
+       atomic_set(&ctx->inflight, 0);
        af_alg_init_completion(&ctx->completion);
 
        ask->private = ctx;
index c8ced12fa45276d365db89427273cc7de17ba322..1cfcea62aed995d701cf2f11bd882d8aae6e1407 100644 (file)
@@ -389,22 +389,49 @@ zsau_resp[] =
        {NULL,                          ZSAU_UNKNOWN}
 };
 
-/* retrieve CID from parsed response
- * returns 0 if no CID, -1 if invalid CID, or CID value 1..65535
+/* check for and remove fixed string prefix
+ * If s starts with prefix terminated by a non-alphanumeric character,
+ * return pointer to the first character after that, otherwise return NULL.
  */
-static int cid_of_response(char *s)
+static char *skip_prefix(char *s, const char *prefix)
 {
-       int cid;
-       int rc;
-
-       if (s[-1] != ';')
-               return 0;       /* no CID separator */
-       rc = kstrtoint(s, 10, &cid);
-       if (rc)
-               return 0;       /* CID not numeric */
-       if (cid < 1 || cid > 65535)
-               return -1;      /* CID out of range */
-       return cid;
+       while (*prefix)
+               if (*s++ != *prefix++)
+                       return NULL;
+       if (isalnum(*s))
+               return NULL;
+       return s;
+}
+
+/* queue event with CID */
+static void add_cid_event(struct cardstate *cs, int cid, int type,
+                         void *ptr, int parameter)
+{
+       unsigned long flags;
+       unsigned next, tail;
+       struct event_t *event;
+
+       gig_dbg(DEBUG_EVENT, "queueing event %d for cid %d", type, cid);
+
+       spin_lock_irqsave(&cs->ev_lock, flags);
+
+       tail = cs->ev_tail;
+       next = (tail + 1) % MAX_EVENTS;
+       if (unlikely(next == cs->ev_head)) {
+               dev_err(cs->dev, "event queue full\n");
+               kfree(ptr);
+       } else {
+               event = cs->events + tail;
+               event->type = type;
+               event->cid = cid;
+               event->ptr = ptr;
+               event->arg = NULL;
+               event->parameter = parameter;
+               event->at_state = NULL;
+               cs->ev_tail = next;
+       }
+
+       spin_unlock_irqrestore(&cs->ev_lock, flags);
 }
 
 /**
@@ -417,190 +444,188 @@ static int cid_of_response(char *s)
  */
 void gigaset_handle_modem_response(struct cardstate *cs)
 {
-       unsigned char *argv[MAX_REC_PARAMS + 1];
-       int params;
-       int i, j;
+       char *eoc, *psep, *ptr;
        const struct resp_type_t *rt;
        const struct zsau_resp_t *zr;
-       int curarg;
-       unsigned long flags;
-       unsigned next, tail, head;
-       struct event_t *event;
-       int resp_code;
-       int param_type;
-       int abort;
-       size_t len;
-       int cid;
-       int rawstring;
-
-       len = cs->cbytes;
-       if (!len) {
+       int cid, parameter;
+       u8 type, value;
+
+       if (!cs->cbytes) {
                /* ignore additional LFs/CRs (M10x config mode or cx100) */
                gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]);
                return;
        }
-       cs->respdata[len] = 0;
-       argv[0] = cs->respdata;
-       params = 1;
+       cs->respdata[cs->cbytes] = 0;
+
        if (cs->at_state.getstring) {
-               /* getstring only allowed without cid at the moment */
+               /* state machine wants next line verbatim */
                cs->at_state.getstring = 0;
-               rawstring = 1;
-               cid = 0;
-       } else {
-               /* parse line */
-               for (i = 0; i < len; i++)
-                       switch (cs->respdata[i]) {
-                       case ';':
-                       case ',':
-                       case '=':
-                               if (params > MAX_REC_PARAMS) {
-                                       dev_warn(cs->dev,
-                                                "too many parameters in response\n");
-                                       /* need last parameter (might be CID) */
-                                       params--;
-                               }
-                               argv[params++] = cs->respdata + i + 1;
-                       }
-
-               rawstring = 0;
-               cid = params > 1 ? cid_of_response(argv[params - 1]) : 0;
-               if (cid < 0) {
-                       gigaset_add_event(cs, &cs->at_state, RSP_INVAL,
-                                         NULL, 0, NULL);
-                       return;
-               }
+               ptr = kstrdup(cs->respdata, GFP_ATOMIC);
+               gig_dbg(DEBUG_EVENT, "string==%s", ptr ? ptr : "NULL");
+               add_cid_event(cs, 0, RSP_STRING, ptr, 0);
+               return;
+       }
 
-               for (j = 1; j < params; ++j)
-                       argv[j][-1] = 0;
+       /* look up response type */
+       for (rt = resp_type; rt->response; ++rt) {
+               eoc = skip_prefix(cs->respdata, rt->response);
+               if (eoc)
+                       break;
+       }
+       if (!rt->response) {
+               add_cid_event(cs, 0, RSP_NONE, NULL, 0);
+               gig_dbg(DEBUG_EVENT, "unknown modem response: '%s'\n",
+                       cs->respdata);
+               return;
+       }
 
-               gig_dbg(DEBUG_EVENT, "CMD received: %s", argv[0]);
-               if (cid) {
-                       --params;
-                       gig_dbg(DEBUG_EVENT, "CID: %s", argv[params]);
-               }
-               gig_dbg(DEBUG_EVENT, "available params: %d", params - 1);
-               for (j = 1; j < params; j++)
-                       gig_dbg(DEBUG_EVENT, "param %d: %s", j, argv[j]);
+       /* check for CID */
+       psep = strrchr(cs->respdata, ';');
+       if (psep &&
+           !kstrtoint(psep + 1, 10, &cid) &&
+           cid >= 1 && cid <= 65535) {
+               /* valid CID: chop it off */
+               *psep = 0;
+       } else {
+               /* no valid CID: leave unchanged */
+               cid = 0;
        }
 
-       spin_lock_irqsave(&cs->ev_lock, flags);
-       head = cs->ev_head;
-       tail = cs->ev_tail;
+       gig_dbg(DEBUG_EVENT, "CMD received: %s", cs->respdata);
+       if (cid)
+               gig_dbg(DEBUG_EVENT, "CID: %d", cid);
 
-       abort = 1;
-       curarg = 0;
-       while (curarg < params) {
-               next = (tail + 1) % MAX_EVENTS;
-               if (unlikely(next == head)) {
-                       dev_err(cs->dev, "event queue full\n");
-                       break;
-               }
+       switch (rt->type) {
+       case RT_NOTHING:
+               /* check parameter separator */
+               if (*eoc)
+                       goto bad_param; /* extra parameter */
 
-               event = cs->events + tail;
-               event->at_state = NULL;
-               event->cid = cid;
-               event->ptr = NULL;
-               event->arg = NULL;
-               tail = next;
+               add_cid_event(cs, cid, rt->resp_code, NULL, 0);
+               break;
 
-               if (rawstring) {
-                       resp_code = RSP_STRING;
-                       param_type = RT_STRING;
-               } else {
-                       for (rt = resp_type; rt->response; ++rt)
-                               if (!strcmp(argv[curarg], rt->response))
+       case RT_RING:
+               /* check parameter separator */
+               if (!*eoc)
+                       eoc = NULL;     /* no parameter */
+               else if (*eoc++ != ',')
+                       goto bad_param;
+
+               add_cid_event(cs, 0, rt->resp_code, NULL, cid);
+
+               /* process parameters as individual responses */
+               while (eoc) {
+                       /* look up parameter type */
+                       psep = NULL;
+                       for (rt = resp_type; rt->response; ++rt) {
+                               psep = skip_prefix(eoc, rt->response);
+                               if (psep)
                                        break;
+                       }
 
-                       if (!rt->response) {
-                               event->type = RSP_NONE;
-                               gig_dbg(DEBUG_EVENT,
-                                       "unknown modem response: '%s'\n",
-                                       argv[curarg]);
-                               break;
+                       /* all legal parameters are of type RT_STRING */
+                       if (!psep || rt->type != RT_STRING) {
+                               dev_warn(cs->dev,
+                                        "illegal RING parameter: '%s'\n",
+                                        eoc);
+                               return;
                        }
 
-                       resp_code = rt->resp_code;
-                       param_type = rt->type;
-                       ++curarg;
-               }
+                       /* skip parameter value separator */
+                       if (*psep++ != '=')
+                               goto bad_param;
 
-               event->type = resp_code;
+                       /* look up end of parameter */
+                       eoc = strchr(psep, ',');
+                       if (eoc)
+                               *eoc++ = 0;
 
-               switch (param_type) {
-               case RT_NOTHING:
-                       break;
-               case RT_RING:
-                       if (!cid) {
-                               dev_err(cs->dev,
-                                       "received RING without CID!\n");
-                               event->type = RSP_INVAL;
-                               abort = 1;
-                       } else {
-                               event->cid = 0;
-                               event->parameter = cid;
-                               abort = 0;
-                       }
+                       /* retrieve parameter value */
+                       ptr = kstrdup(psep, GFP_ATOMIC);
+
+                       /* queue event */
+                       add_cid_event(cs, cid, rt->resp_code, ptr, 0);
+               }
+               break;
+
+       case RT_ZSAU:
+               /* check parameter separator */
+               if (!*eoc) {
+                       /* no parameter */
+                       add_cid_event(cs, cid, rt->resp_code, NULL, ZSAU_NONE);
                        break;
-               case RT_ZSAU:
-                       if (curarg >= params) {
-                               event->parameter = ZSAU_NONE;
+               }
+               if (*eoc++ != '=')
+                       goto bad_param;
+
+               /* look up parameter value */
+               for (zr = zsau_resp; zr->str; ++zr)
+                       if (!strcmp(eoc, zr->str))
                                break;
-                       }
-                       for (zr = zsau_resp; zr->str; ++zr)
-                               if (!strcmp(argv[curarg], zr->str))
-                                       break;
-                       event->parameter = zr->code;
-                       if (!zr->str)
-                               dev_warn(cs->dev,
-                                        "%s: unknown parameter %s after ZSAU\n",
-                                        __func__, argv[curarg]);
-                       ++curarg;
-                       break;
-               case RT_STRING:
-                       if (curarg < params) {
-                               event->ptr = kstrdup(argv[curarg], GFP_ATOMIC);
-                               if (!event->ptr)
-                                       dev_err(cs->dev, "out of memory\n");
-                               ++curarg;
-                       }
-                       gig_dbg(DEBUG_EVENT, "string==%s",
-                               event->ptr ? (char *) event->ptr : "NULL");
-                       break;
-               case RT_ZCAU:
-                       event->parameter = -1;
-                       if (curarg + 1 < params) {
-                               u8 type, value;
-
-                               i = kstrtou8(argv[curarg++], 16, &type);
-                               j = kstrtou8(argv[curarg++], 16, &value);
-                               if (i == 0 && j == 0)
-                                       event->parameter = (type << 8) | value;
-                       } else
-                               curarg = params - 1;
-                       break;
-               case RT_NUMBER:
-                       if (curarg >= params ||
-                           kstrtoint(argv[curarg++], 10, &event->parameter))
-                               event->parameter = -1;
-                       gig_dbg(DEBUG_EVENT, "parameter==%d", event->parameter);
-                       break;
+               if (!zr->str)
+                       goto bad_param;
+
+               add_cid_event(cs, cid, rt->resp_code, NULL, zr->code);
+               break;
+
+       case RT_STRING:
+               /* check parameter separator */
+               if (*eoc++ != '=')
+                       goto bad_param;
+
+               /* retrieve parameter value */
+               ptr = kstrdup(eoc, GFP_ATOMIC);
+
+               /* queue event */
+               add_cid_event(cs, cid, rt->resp_code, ptr, 0);
+               break;
+
+       case RT_ZCAU:
+               /* check parameter separators */
+               if (*eoc++ != '=')
+                       goto bad_param;
+               psep = strchr(eoc, ',');
+               if (!psep)
+                       goto bad_param;
+               *psep++ = 0;
+
+               /* decode parameter values */
+               if (kstrtou8(eoc, 16, &type) || kstrtou8(psep, 16, &value)) {
+                       *--psep = ',';
+                       goto bad_param;
                }
+               parameter = (type << 8) | value;
 
-               if (resp_code == RSP_ZDLE)
-                       cs->dle = event->parameter;
+               add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
+               break;
 
-               if (abort)
-                       break;
-       }
+       case RT_NUMBER:
+               /* check parameter separator */
+               if (*eoc++ != '=')
+                       goto bad_param;
 
-       cs->ev_tail = tail;
-       spin_unlock_irqrestore(&cs->ev_lock, flags);
+               /* decode parameter value */
+               if (kstrtoint(eoc, 10, &parameter))
+                       goto bad_param;
+
+               /* special case ZDLE: set flag before queueing event */
+               if (rt->resp_code == RSP_ZDLE)
+                       cs->dle = parameter;
 
-       if (curarg != params)
-               gig_dbg(DEBUG_EVENT,
-                       "invalid number of processed parameters: %d/%d",
-                       curarg, params);
+               add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
+               break;
+
+bad_param:
+               /* parameter unexpected, incomplete or malformed */
+               dev_warn(cs->dev, "bad parameter in response '%s'\n",
+                        cs->respdata);
+               add_cid_event(cs, cid, rt->resp_code, NULL, -1);
+               break;
+
+       default:
+               dev_err(cs->dev, "%s: internal error on '%s'\n",
+                       __func__, cs->respdata);
+       }
 }
 EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
 
index efda7cf82394ba422901273b61aebda081cbba84..fa8f9e147c34b6ed2c49e4c7679cfb9bcac34298 100644 (file)
@@ -115,53 +115,91 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac,
        bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
 }
 
+static void
+bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
+                    int i, int len, u32 ctl0)
+{
+       struct bgmac_slot_info *slot;
+       struct bgmac_dma_desc *dma_desc;
+       u32 ctl1;
+
+       if (i == ring->num_slots - 1)
+               ctl0 |= BGMAC_DESC_CTL0_EOT;
+
+       ctl1 = len & BGMAC_DESC_CTL1_LEN;
+
+       slot = &ring->slots[i];
+       dma_desc = &ring->cpu_base[i];
+       dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
+       dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
+       dma_desc->ctl0 = cpu_to_le32(ctl0);
+       dma_desc->ctl1 = cpu_to_le32(ctl1);
+}
+
 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
                                    struct bgmac_dma_ring *ring,
                                    struct sk_buff *skb)
 {
        struct device *dma_dev = bgmac->core->dma_dev;
        struct net_device *net_dev = bgmac->net_dev;
-       struct bgmac_dma_desc *dma_desc;
-       struct bgmac_slot_info *slot;
-       u32 ctl0, ctl1;
+       struct bgmac_slot_info *slot = &ring->slots[ring->end];
        int free_slots;
+       int nr_frags;
+       u32 flags;
+       int index = ring->end;
+       int i;
 
        if (skb->len > BGMAC_DESC_CTL1_LEN) {
                bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
-               goto err_stop_drop;
+               goto err_drop;
        }
 
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb_checksum_help(skb);
+
+       nr_frags = skb_shinfo(skb)->nr_frags;
+
        if (ring->start <= ring->end)
                free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
        else
                free_slots = ring->start - ring->end;
-       if (free_slots == 1) {
+
+       if (free_slots <= nr_frags + 1) {
                bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
                netif_stop_queue(net_dev);
                return NETDEV_TX_BUSY;
        }
 
-       slot = &ring->slots[ring->end];
-       slot->skb = skb;
-       slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
+       slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
                                        DMA_TO_DEVICE);
-       if (dma_mapping_error(dma_dev, slot->dma_addr)) {
-               bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
-                         ring->mmio_base);
-               goto err_stop_drop;
-       }
+       if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
+               goto err_dma_head;
 
-       ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
-       if (ring->end == ring->num_slots - 1)
-               ctl0 |= BGMAC_DESC_CTL0_EOT;
-       ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
+       flags = BGMAC_DESC_CTL0_SOF;
+       if (!nr_frags)
+               flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
 
-       dma_desc = ring->cpu_base;
-       dma_desc += ring->end;
-       dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
-       dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
-       dma_desc->ctl0 = cpu_to_le32(ctl0);
-       dma_desc->ctl1 = cpu_to_le32(ctl1);
+       bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
+       flags = 0;
+
+       for (i = 0; i < nr_frags; i++) {
+               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               int len = skb_frag_size(frag);
+
+               index = (index + 1) % BGMAC_TX_RING_SLOTS;
+               slot = &ring->slots[index];
+               slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
+                                                 len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
+                       goto err_dma;
+
+               if (i == nr_frags - 1)
+                       flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
+
+               bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
+       }
+
+       slot->skb = skb;
 
        netdev_sent_queue(net_dev, skb->len);
 
@@ -170,20 +208,35 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
        /* Increase ring->end to point empty slot. We tell hardware the first
         * slot it should *not* read.
         */
-       if (++ring->end >= BGMAC_TX_RING_SLOTS)
-               ring->end = 0;
+       ring->end = (index + 1) % BGMAC_TX_RING_SLOTS;
        bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
                    ring->index_base +
                    ring->end * sizeof(struct bgmac_dma_desc));
 
-       /* Always keep one slot free to allow detecting bugged calls. */
-       if (--free_slots == 1)
+       free_slots -= nr_frags + 1;
+       if (free_slots < 8)
                netif_stop_queue(net_dev);
 
        return NETDEV_TX_OK;
 
-err_stop_drop:
-       netif_stop_queue(net_dev);
+err_dma:
+       dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
+                        DMA_TO_DEVICE);
+
+       while (i > 0) {
+               int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
+               struct bgmac_slot_info *slot = &ring->slots[index];
+               u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
+               int len = ctl1 & BGMAC_DESC_CTL1_LEN;
+
+               dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
+       }
+
+err_dma_head:
+       bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
+                 ring->mmio_base);
+
+err_drop:
        dev_kfree_skb(skb);
        return NETDEV_TX_OK;
 }
@@ -205,32 +258,45 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
 
        while (ring->start != empty_slot) {
                struct bgmac_slot_info *slot = &ring->slots[ring->start];
+               u32 ctl1 = le32_to_cpu(ring->cpu_base[ring->start].ctl1);
+               int len = ctl1 & BGMAC_DESC_CTL1_LEN;
 
-               if (slot->skb) {
+               if (!slot->dma_addr) {
+                       bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
+                                 ring->start, ring->end);
+                       goto next;
+               }
+
+               if (ctl1 & BGMAC_DESC_CTL0_SOF)
                        /* Unmap no longer used buffer */
-                       dma_unmap_single(dma_dev, slot->dma_addr,
-                                        slot->skb->len, DMA_TO_DEVICE);
-                       slot->dma_addr = 0;
+                       dma_unmap_single(dma_dev, slot->dma_addr, len,
+                                        DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(dma_dev, slot->dma_addr, len,
+                                      DMA_TO_DEVICE);
 
+               if (slot->skb) {
                        bytes_compl += slot->skb->len;
                        pkts_compl++;
 
                        /* Free memory! :) */
                        dev_kfree_skb(slot->skb);
                        slot->skb = NULL;
-               } else {
-                       bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
-                                 ring->start, ring->end);
                }
 
+next:
+               slot->dma_addr = 0;
                if (++ring->start >= BGMAC_TX_RING_SLOTS)
                        ring->start = 0;
                freed = true;
        }
 
+       if (!pkts_compl)
+               return;
+
        netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
 
-       if (freed && netif_queue_stopped(bgmac->net_dev))
+       if (netif_queue_stopped(bgmac->net_dev))
                netif_wake_queue(bgmac->net_dev);
 }
 
@@ -276,31 +342,31 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
                                     struct bgmac_slot_info *slot)
 {
        struct device *dma_dev = bgmac->core->dma_dev;
-       struct sk_buff *skb;
        dma_addr_t dma_addr;
        struct bgmac_rx_header *rx;
+       void *buf;
 
        /* Alloc skb */
-       skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
-       if (!skb)
+       buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
+       if (!buf)
                return -ENOMEM;
 
        /* Poison - if everything goes fine, hardware will overwrite it */
-       rx = (struct bgmac_rx_header *)skb->data;
+       rx = buf;
        rx->len = cpu_to_le16(0xdead);
        rx->flags = cpu_to_le16(0xbeef);
 
        /* Map skb for the DMA */
-       dma_addr = dma_map_single(dma_dev, skb->data,
-                                 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+       dma_addr = dma_map_single(dma_dev, buf, BGMAC_RX_BUF_SIZE,
+                                 DMA_FROM_DEVICE);
        if (dma_mapping_error(dma_dev, dma_addr)) {
                bgmac_err(bgmac, "DMA mapping error\n");
-               dev_kfree_skb(skb);
+               put_page(virt_to_head_page(buf));
                return -ENOMEM;
        }
 
        /* Update the slot */
-       slot->skb = skb;
+       slot->buf = buf;
        slot->dma_addr = dma_addr;
 
        return 0;
@@ -343,8 +409,9 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
        while (ring->start != ring->end) {
                struct device *dma_dev = bgmac->core->dma_dev;
                struct bgmac_slot_info *slot = &ring->slots[ring->start];
-               struct sk_buff *skb = slot->skb;
-               struct bgmac_rx_header *rx;
+               struct bgmac_rx_header *rx = slot->buf;
+               struct sk_buff *skb;
+               void *buf = slot->buf;
                u16 len, flags;
 
                /* Unmap buffer to make it accessible to the CPU */
@@ -352,7 +419,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
                                        BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
 
                /* Get info from the header */
-               rx = (struct bgmac_rx_header *)skb->data;
                len = le16_to_cpu(rx->len);
                flags = le16_to_cpu(rx->flags);
 
@@ -393,12 +459,13 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
                        dma_unmap_single(dma_dev, old_dma_addr,
                                         BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
 
+                       skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
                        skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
                        skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
 
                        skb_checksum_none_assert(skb);
                        skb->protocol = eth_type_trans(skb, bgmac->net_dev);
-                       netif_receive_skb(skb);
+                       napi_gro_receive(&bgmac->napi, skb);
                        handled++;
                } while (0);
 
@@ -434,40 +501,79 @@ static bool bgmac_dma_unaligned(struct bgmac *bgmac,
        return false;
 }
 
-static void bgmac_dma_ring_free(struct bgmac *bgmac,
-                               struct bgmac_dma_ring *ring)
+static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
+                                  struct bgmac_dma_ring *ring)
 {
        struct device *dma_dev = bgmac->core->dma_dev;
+       struct bgmac_dma_desc *dma_desc = ring->cpu_base;
        struct bgmac_slot_info *slot;
-       int size;
        int i;
 
        for (i = 0; i < ring->num_slots; i++) {
+               int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
+
                slot = &ring->slots[i];
-               if (slot->skb) {
-                       if (slot->dma_addr)
-                               dma_unmap_single(dma_dev, slot->dma_addr,
-                                                slot->skb->len, DMA_TO_DEVICE);
-                       dev_kfree_skb(slot->skb);
-               }
+               dev_kfree_skb(slot->skb);
+
+               if (!slot->dma_addr)
+                       continue;
+
+               if (slot->skb)
+                       dma_unmap_single(dma_dev, slot->dma_addr,
+                                        len, DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(dma_dev, slot->dma_addr,
+                                      len, DMA_TO_DEVICE);
        }
+}
 
-       if (ring->cpu_base) {
-               /* Free ring of descriptors */
-               size = ring->num_slots * sizeof(struct bgmac_dma_desc);
-               dma_free_coherent(dma_dev, size, ring->cpu_base,
-                                 ring->dma_base);
+static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
+                                  struct bgmac_dma_ring *ring)
+{
+       struct device *dma_dev = bgmac->core->dma_dev;
+       struct bgmac_slot_info *slot;
+       int i;
+
+       for (i = 0; i < ring->num_slots; i++) {
+               slot = &ring->slots[i];
+               if (!slot->buf)
+                       continue;
+
+               if (slot->dma_addr)
+                       dma_unmap_single(dma_dev, slot->dma_addr,
+                                        BGMAC_RX_BUF_SIZE,
+                                        DMA_FROM_DEVICE);
+               put_page(virt_to_head_page(slot->buf));
        }
 }
 
+static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
+                                    struct bgmac_dma_ring *ring)
+{
+       struct device *dma_dev = bgmac->core->dma_dev;
+       int size;
+
+       if (!ring->cpu_base)
+           return;
+
+       /* Free ring of descriptors */
+       size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+       dma_free_coherent(dma_dev, size, ring->cpu_base,
+                         ring->dma_base);
+}
+
 static void bgmac_dma_free(struct bgmac *bgmac)
 {
        int i;
 
-       for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
-               bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
-       for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
-               bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
+       for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+               bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
+               bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i]);
+       }
+       for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+               bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
+               bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i]);
+       }
 }
 
 static int bgmac_dma_alloc(struct bgmac *bgmac)
@@ -1551,6 +1657,10 @@ static int bgmac_probe(struct bcma_device *core)
                goto err_dma_free;
        }
 
+       net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       net_dev->hw_features = net_dev->features;
+       net_dev->vlan_features = net_dev->features;
+
        err = register_netdev(bgmac->net_dev);
        if (err) {
                bgmac_err(bgmac, "Cannot register net device\n");
index 89fa5bc69c515f3fd3a4c309558294448fceb386..3ad965fe7fcc8f7e998eafcbe418aa65f171b760 100644 (file)
 
 #define BGMAC_DESC_CTL0_EOT                    0x10000000      /* End of ring */
 #define BGMAC_DESC_CTL0_IOC                    0x20000000      /* IRQ on complete */
-#define BGMAC_DESC_CTL0_SOF                    0x40000000      /* Start of frame */
-#define BGMAC_DESC_CTL0_EOF                    0x80000000      /* End of frame */
+#define BGMAC_DESC_CTL0_EOF                    0x40000000      /* End of frame */
+#define BGMAC_DESC_CTL0_SOF                    0x80000000      /* Start of frame */
 #define BGMAC_DESC_CTL1_LEN                    0x00001FFF
 
 #define BGMAC_PHY_NOREGS                       0x1E
 #define BGMAC_RX_FRAME_OFFSET                  30              /* There are 2 unused bytes between header and real data */
 #define BGMAC_RX_MAX_FRAME_SIZE                        1536            /* Copied from b44/tg3 */
 #define BGMAC_RX_BUF_SIZE                      (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
+#define BGMAC_RX_ALLOC_SIZE                    (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE) + \
+                                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
 #define BGMAC_BFL_ENETROBO                     0x0010          /* has ephy roboswitch spi */
 #define BGMAC_BFL_ENETADM                      0x0080          /* has ADMtek switch */
 #define ETHER_MAX_LEN   1518
 
 struct bgmac_slot_info {
-       struct sk_buff *skb;
+       union {
+               struct sk_buff *skb;
+               void *buf;
+       };
        dma_addr_t dma_addr;
 };
 
index 94603ee742eaea4443d8a3de005a622961d1ce4f..6080f8e7b0cd893c3fc05b288a16e3b383f5a14f 100644 (file)
@@ -2254,7 +2254,7 @@ static int vxlan_stop(struct net_device *dev)
        struct vxlan_sock *vs = vxlan->vn_sock;
        int ret = 0;
 
-       if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
+       if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
            !vxlan_group_used(vn, vxlan)) {
                ret = vxlan_igmp_leave(vxlan);
                if (ret)
index 178525e5f430ba99f70a8f8250937501b46a650d..018afb264ac261ea5dea2f3afa547cf9a8f7a202 100644 (file)
@@ -58,8 +58,9 @@ struct af_alg_type {
 };
 
 struct af_alg_sgl {
-       struct scatterlist sg[ALG_MAX_PAGES];
+       struct scatterlist sg[ALG_MAX_PAGES + 1];
        struct page *pages[ALG_MAX_PAGES];
+       unsigned int npages;
 };
 
 int af_alg_register_type(const struct af_alg_type *type);
@@ -70,6 +71,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock);
 
 int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
 void af_alg_free_sg(struct af_alg_sgl *sgl);
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
 
 int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
 
index 439ff698000aa4ef81f5cef53e69095ce9c9fc80..221025423e6c993b70918e1186dabb7ff49a00e2 100644 (file)
@@ -43,6 +43,7 @@ enum dccp_state {
        DCCP_CLOSING         = TCP_CLOSING,
        DCCP_TIME_WAIT       = TCP_TIME_WAIT,
        DCCP_CLOSED          = TCP_CLOSE,
+       DCCP_NEW_SYN_RECV    = TCP_NEW_SYN_RECV,
        DCCP_PARTOPEN        = TCP_MAX_STATES,
        DCCP_PASSIVE_CLOSEREQ,                  /* clients receiving CloseReq */
        DCCP_MAX_STATES
@@ -57,6 +58,7 @@ enum {
        DCCPF_CLOSING         = TCPF_CLOSING,
        DCCPF_TIME_WAIT       = TCPF_TIME_WAIT,
        DCCPF_CLOSED          = TCPF_CLOSE,
+       DCCPF_NEW_SYN_RECV    = TCPF_NEW_SYN_RECV,
        DCCPF_PARTOPEN        = (1 << DCCP_PARTOPEN),
 };
 
@@ -317,6 +319,6 @@ static inline const char *dccp_role(const struct sock *sk)
        return NULL;
 }
 
-extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+extern void dccp_syn_ack_timeout(const struct request_sock *req);
 
 #endif /* _LINUX_DCCP_H */
index bb39113ea5965f07cae1862d0e9c64980307a3e8..2734977199cac6a587126202dff59827724af277 100644 (file)
@@ -19,23 +19,10 @@ enum nf_br_hook_priorities {
 
 #define BRNF_PKT_TYPE                  0x01
 #define BRNF_BRIDGED_DNAT              0x02
-#define BRNF_BRIDGED                   0x04
 #define BRNF_NF_BRIDGE_PREROUTING      0x08
 #define BRNF_8021Q                     0x10
 #define BRNF_PPPoE                     0x20
 
-static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
-{
-       switch (skb->protocol) {
-       case __cpu_to_be16(ETH_P_8021Q):
-               return VLAN_HLEN;
-       case __cpu_to_be16(ETH_P_PPP_SES):
-               return PPPOE_SES_HLEN;
-       default:
-               return 0;
-       }
-}
-
 static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
 {
        if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
@@ -45,21 +32,6 @@ static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
 
 int br_handle_frame_finish(struct sk_buff *skb);
 
-/* This is called by the IP fragmenting code and it ensures there is
- * enough room for the encapsulating header (if there is one). */
-static inline unsigned int nf_bridge_pad(const struct sk_buff *skb)
-{
-       if (skb->nf_bridge)
-               return nf_bridge_encap_header_len(skb);
-       return 0;
-}
-
-struct bridge_skb_cb {
-       union {
-               __be32 ipv4;
-       } daddr;
-};
-
 static inline void br_drop_fake_rtable(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
@@ -69,7 +41,6 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
 }
 
 #else
-#define nf_bridge_pad(skb)                     (0)
 #define br_drop_fake_rtable(skb)               do { } while (0)
 #endif /* CONFIG_BRIDGE_NETFILTER */
 
index fab4d0ddf4eda67ff416bb10a1e7b545639462fd..c9852ef7e317a6167675f68e7948b6aebea85947 100644 (file)
@@ -51,6 +51,7 @@ struct msghdr {
        void            *msg_control;   /* ancillary data */
        __kernel_size_t msg_controllen; /* ancillary data buffer length */
        unsigned int    msg_flags;      /* flags on received message */
+       struct kiocb    *msg_iocb;      /* ptr to iocb for async requests */
 };
  
 struct user_msghdr {
index 6a91261d9b7b577c677bd5e02a4a14394e6aaa5b..fe41f3ceb008d767d594de6a042393ba463b509b 100644 (file)
@@ -39,8 +39,7 @@ struct request_sock_ops {
        void            (*send_reset)(struct sock *sk,
                                      struct sk_buff *skb);
        void            (*destructor)(struct request_sock *req);
-       void            (*syn_ack_timeout)(struct sock *sk,
-                                          struct request_sock *req);
+       void            (*syn_ack_timeout)(const struct request_sock *req);
 };
 
 int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
@@ -174,11 +173,6 @@ struct fastopen_queue {
  * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
  * lock sock while browsing the listening hash (otherwise it's deadlock prone).
  *
- * This lock is acquired in read mode only from listening_get_next() seq_file
- * op and it's acquired in write mode _only_ from code that is actively
- * changing rskq_accept_head. All readers that are holding the master sock lock
- * don't need to grab this lock in read mode too as rskq_accept_head. writes
- * are always protected from the main sock lock.
  */
 struct request_sock_queue {
        struct request_sock     *rskq_accept_head;
@@ -193,7 +187,7 @@ struct request_sock_queue {
                                             */
 
        /* temporary alignment, our goal is to get rid of this lock */
-       rwlock_t                syn_wait_lock ____cacheline_aligned_in_smp;
+       spinlock_t              syn_wait_lock ____cacheline_aligned_in_smp;
 };
 
 int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -224,14 +218,14 @@ static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
        struct listen_sock *lopt = queue->listen_opt;
        struct request_sock **prev;
 
-       write_lock(&queue->syn_wait_lock);
+       spin_lock(&queue->syn_wait_lock);
 
        prev = &lopt->syn_table[req->rsk_hash];
        while (*prev != req)
                prev = &(*prev)->dl_next;
        *prev = req->dl_next;
 
-       write_unlock(&queue->syn_wait_lock);
+       spin_unlock(&queue->syn_wait_lock);
        if (del_timer(&req->rsk_timer))
                reqsk_put(req);
 }
index 082fd79132b76dce3ec75bf8dac65f9cf85ada21..fe60e00e191973995ab275b873a694cd5e570717 100644 (file)
@@ -433,7 +433,7 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
                          char __user *optval, unsigned int optlen);
 void tcp_set_keepalive(struct sock *sk, int val);
-void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+void tcp_syn_ack_timeout(const struct request_sock *req);
 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                int flags, int *addr_len);
 void tcp_parse_options(const struct sk_buff *skb,
@@ -447,6 +447,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 
 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 void tcp_v4_mtu_reduced(struct sock *sk);
+void tcp_req_err(struct sock *sk, u32 seq);
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 struct sock *tcp_create_openreq_child(struct sock *sk,
                                      struct request_sock *req,
index da2d668b8cf1703d167bcea99013385639c14967..053bd102fbe00a0affd7227359e25c7246de9d7e 100644 (file)
@@ -99,6 +99,7 @@ struct tpacket_auxdata {
 #define TP_STATUS_VLAN_VALID           (1 << 4) /* auxdata has valid tp_vlan_tci */
 #define TP_STATUS_BLK_TMO              (1 << 5)
 #define TP_STATUS_VLAN_TPID_VALID      (1 << 6) /* auxdata has valid tp_vlan_tpid */
+#define TP_STATUS_CSUM_VALID           (1 << 7)
 
 /* Tx ring - header status */
 #define TP_STATUS_AVAILABLE          0
index b260a97275db30fbe5a7e301c989e95996d36afb..f3884a1b942f7ae788dc19ff6131f44ea0c809eb 100644 (file)
 #include <net/route.h>
 #include <net/netfilter/br_netfilter.h>
 
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
 #include <asm/uaccess.h>
 #include "br_private.h"
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
 #endif
 
-#define skb_origaddr(skb)       (((struct bridge_skb_cb *) \
-                                (skb->nf_bridge->data))->daddr.ipv4)
-#define store_orig_dstaddr(skb)         (skb_origaddr(skb) = ip_hdr(skb)->daddr)
-#define dnat_took_place(skb)    (skb_origaddr(skb) != ip_hdr(skb)->daddr)
-
 #ifdef CONFIG_SYSCTL
 static struct ctl_table_header *brnf_sysctl_header;
 static int brnf_call_iptables __read_mostly = 1;
@@ -154,6 +153,18 @@ static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
        return nf_bridge;
 }
 
+static unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
+{
+       switch (skb->protocol) {
+       case __cpu_to_be16(ETH_P_8021Q):
+               return VLAN_HLEN;
+       case __cpu_to_be16(ETH_P_PPP_SES):
+               return PPPOE_SES_HLEN;
+       default:
+               return 0;
+       }
+}
+
 static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
 {
        unsigned int len = nf_bridge_encap_header_len(skb);
@@ -322,6 +333,22 @@ free_skb:
        return 0;
 }
 
+static bool dnat_took_place(const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (!ct || nf_ct_is_untracked(ct))
+               return false;
+
+       return test_bit(IPS_DST_NAT_BIT, &ct->status);
+#else
+       return false;
+#endif
+}
+
 /* This requires some explaining. If DNAT has taken place,
  * we will need to fix up the destination Ethernet address.
  *
@@ -625,7 +652,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
                return NF_DROP;
        if (!setup_pre_routing(skb))
                return NF_DROP;
-       store_orig_dstaddr(skb);
+
        skb->protocol = htons(ETH_P_IP);
 
        NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
@@ -721,8 +748,6 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
        if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
                return NF_DROP;
 
-       /* The physdev module checks on this */
-       nf_bridge->mask |= BRNF_BRIDGED;
        nf_bridge->physoutdev = skb->dev;
        if (pf == NFPROTO_IPV4)
                skb->protocol = htons(ETH_P_IP);
@@ -842,7 +867,12 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
        struct net_device *realoutdev = bridge_parent(skb->dev);
        u_int8_t pf;
 
-       if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
+       /* if nf_bridge is set, but ->physoutdev is NULL, this packet came in
+        * on a bridge, but was delivered locally and is now being routed:
+        *
+        * POST_ROUTING was already invoked from the ip stack.
+        */
+       if (!nf_bridge || !nf_bridge->physoutdev)
                return NF_ACCEPT;
 
        if (!realoutdev)
index 13c0c9a25cd99e8e868dc71c23246d45aeaa7c8b..c4b6b0f43d5d4243d8c35274a4669333bd9f5f25 100644 (file)
@@ -79,6 +79,8 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
        if (nr_segs > UIO_MAXIOV)
                return -EMSGSIZE;
 
+       kmsg->msg_iocb = NULL;
+
        err = compat_rw_copy_check_uvector(save_addr ? READ : WRITE,
                                           compat_ptr(uiov), nr_segs,
                                           UIO_FASTIOV, *iov, iov);
index 5d43e010ef870a6ab92895297fe18d6e6a03593a..a0408d497dae04e7caa145f05c915b058aa2d356 100644 (file)
@@ -1696,6 +1696,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
        }
 
        skb_scrub_packet(skb, true);
+       skb->priority = 0;
        skb->protocol = eth_type_trans(skb, dev);
        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 
index cdc0ddd9ac9f7c1768c1d6b7ed30a09ed476137d..87b22c0bc08c2f33fa31948b8b2604f48b8009bc 100644 (file)
@@ -58,14 +58,14 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
                return -ENOMEM;
 
        get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
-       rwlock_init(&queue->syn_wait_lock);
+       spin_lock_init(&queue->syn_wait_lock);
        queue->rskq_accept_head = NULL;
        lopt->nr_table_entries = nr_table_entries;
        lopt->max_qlen_log = ilog2(nr_table_entries);
 
-       write_lock_bh(&queue->syn_wait_lock);
+       spin_lock_bh(&queue->syn_wait_lock);
        queue->listen_opt = lopt;
-       write_unlock_bh(&queue->syn_wait_lock);
+       spin_unlock_bh(&queue->syn_wait_lock);
 
        return 0;
 }
@@ -81,10 +81,10 @@ static inline struct listen_sock *reqsk_queue_yank_listen_sk(
 {
        struct listen_sock *lopt;
 
-       write_lock_bh(&queue->syn_wait_lock);
+       spin_lock_bh(&queue->syn_wait_lock);
        lopt = queue->listen_opt;
        queue->listen_opt = NULL;
-       write_unlock_bh(&queue->syn_wait_lock);
+       spin_unlock_bh(&queue->syn_wait_lock);
 
        return lopt;
 }
@@ -100,7 +100,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
                for (i = 0; i < lopt->nr_table_entries; i++) {
                        struct request_sock *req;
 
-                       write_lock_bh(&queue->syn_wait_lock);
+                       spin_lock_bh(&queue->syn_wait_lock);
                        while ((req = lopt->syn_table[i]) != NULL) {
                                lopt->syn_table[i] = req->dl_next;
                                atomic_inc(&lopt->qlen_dec);
@@ -108,7 +108,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
                                        reqsk_put(req);
                                reqsk_put(req);
                        }
-                       write_unlock_bh(&queue->syn_wait_lock);
+                       spin_unlock_bh(&queue->syn_wait_lock);
                }
        }
 
index 841108b5649f8ceee8fb77656bce0f45eff14c39..119ae464b44a44355e77b7ffb62e56e03acfd875 100644 (file)
@@ -928,8 +928,6 @@ set_rcvbuf:
                        sk->sk_mark = val;
                break;
 
-               /* We implement the SO_SNDLOWAT etc to
-                  not be settable (1003.1g 5.3) */
        case SO_RXQ_OVFL:
                sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
                break;
@@ -1234,6 +1232,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 
        default:
+               /* We implement the SO_SNDLOWAT etc to not be settable
+                * (1003.1g 7).
+                */
                return -ENOPROTOOPT;
        }
 
index 2396f50c5b044095b9a6c4b85ed74b3ce44f1f3d..bebc735f5afc0fd9993a2a6ddc4074dcaa5b1559 100644 (file)
@@ -317,6 +317,7 @@ int inet_dccp_listen(struct socket *sock, int backlog);
 unsigned int dccp_poll(struct file *file, struct socket *sock,
                       poll_table *wait);
 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+void dccp_req_err(struct sock *sk, u64 seq);
 
 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb);
 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
index 25a9615b3b88993208a1e73f312103646c2d557f..2b4f21d34df6819c134b590d8ddeecffe668aaf6 100644 (file)
@@ -195,6 +195,32 @@ static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk)
                dst->ops->redirect(dst, sk, skb);
 }
 
+void dccp_req_err(struct sock *sk, u64 seq)
+       {
+       struct request_sock *req = inet_reqsk(sk);
+       struct net *net = sock_net(sk);
+
+       /*
+        * ICMPs are not backlogged, hence we cannot get an established
+        * socket here.
+        */
+       WARN_ON(req->sk);
+
+       if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
+               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               reqsk_put(req);
+       } else {
+               /*
+                * Still in RESPOND, just remove it silently.
+                * There is no good way to pass the error to the newly
+                * created socket, and POSIX does not want network
+                * errors returned from accept().
+                */
+               inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+       }
+}
+EXPORT_SYMBOL(dccp_req_err);
+
 /*
  * This routine is called by the ICMP module when it gets some sort of error
  * condition. If err < 0 then the socket should be closed and the error
@@ -227,10 +253,11 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
                return;
        }
 
-       sk = inet_lookup(net, &dccp_hashinfo,
-                       iph->daddr, dh->dccph_dport,
-                       iph->saddr, dh->dccph_sport, inet_iif(skb));
-       if (sk == NULL) {
+       sk = __inet_lookup_established(net, &dccp_hashinfo,
+                                      iph->daddr, dh->dccph_dport,
+                                      iph->saddr, ntohs(dh->dccph_sport),
+                                      inet_iif(skb));
+       if (!sk) {
                ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return;
        }
@@ -239,6 +266,9 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
                inet_twsk_put(inet_twsk(sk));
                return;
        }
+       seq = dccp_hdr_seq(dh);
+       if (sk->sk_state == DCCP_NEW_SYN_RECV)
+               return dccp_req_err(sk, seq);
 
        bh_lock_sock(sk);
        /* If too many ICMPs get dropped on busy
@@ -251,7 +281,6 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
                goto out;
 
        dp = dccp_sk(sk);
-       seq = dccp_hdr_seq(dh);
        if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
            !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
@@ -288,37 +317,6 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
        }
 
        switch (sk->sk_state) {
-               struct request_sock *req;
-       case DCCP_LISTEN:
-               if (sock_owned_by_user(sk))
-                       goto out;
-               req = inet_csk_search_req(sk, dh->dccph_dport,
-                                         iph->daddr, iph->saddr);
-               if (!req)
-                       goto out;
-
-               /*
-                * ICMPs are not backlogged, hence we cannot get an established
-                * socket here.
-                */
-               WARN_ON(req->sk);
-
-               if (!between48(seq, dccp_rsk(req)->dreq_iss,
-                                   dccp_rsk(req)->dreq_gss)) {
-                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-                       reqsk_put(req);
-                       goto out;
-               }
-               /*
-                * Still in RESPOND, just remove it silently.
-                * There is no good way to pass the error to the newly
-                * created socket, and POSIX does not want network
-                * errors returned from accept().
-                */
-               inet_csk_reqsk_queue_drop(sk, req);
-               reqsk_put(req);
-               goto out;
-
        case DCCP_REQUESTING:
        case DCCP_RESPOND:
                if (!sock_owned_by_user(sk)) {
@@ -576,7 +574,7 @@ static void dccp_v4_reqsk_destructor(struct request_sock *req)
        kfree(inet_rsk(req)->opt);
 }
 
-void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
+void dccp_syn_ack_timeout(const struct request_sock *req)
 {
 }
 EXPORT_SYMBOL(dccp_syn_ack_timeout);
index 69d8f13895bac406a275ecd6ece4334c8d1ebb95..9d0551092c6cd73f3cfa30c89130bac69d693118 100644 (file)
@@ -85,11 +85,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                return;
        }
 
-       sk = inet6_lookup(net, &dccp_hashinfo,
-                       &hdr->daddr, dh->dccph_dport,
-                       &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
+       sk = __inet6_lookup_established(net, &dccp_hashinfo,
+                                       &hdr->daddr, dh->dccph_dport,
+                                       &hdr->saddr, ntohs(dh->dccph_sport),
+                                       inet6_iif(skb));
 
-       if (sk == NULL) {
+       if (!sk) {
                ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
                                   ICMP6_MIB_INERRORS);
                return;
@@ -99,6 +100,9 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                inet_twsk_put(inet_twsk(sk));
                return;
        }
+       seq = dccp_hdr_seq(dh);
+       if (sk->sk_state == DCCP_NEW_SYN_RECV)
+               return dccp_req_err(sk, seq);
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk))
@@ -108,7 +112,6 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                goto out;
 
        dp = dccp_sk(sk);
-       seq = dccp_hdr_seq(dh);
        if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
            !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
@@ -149,34 +152,6 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        /* Might be for an request_sock */
        switch (sk->sk_state) {
-               struct request_sock *req;
-       case DCCP_LISTEN:
-               if (sock_owned_by_user(sk))
-                       goto out;
-
-               req = inet6_csk_search_req(sk, dh->dccph_dport,
-                                          &hdr->daddr, &hdr->saddr,
-                                          inet6_iif(skb));
-               if (!req)
-                       goto out;
-
-               /*
-                * ICMPs are not backlogged, hence we cannot get an established
-                * socket here.
-                */
-               WARN_ON(req->sk != NULL);
-
-               if (!between48(seq, dccp_rsk(req)->dreq_iss,
-                                   dccp_rsk(req)->dreq_gss)) {
-                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-                       reqsk_put(req);
-                       goto out;
-               }
-
-               inet_csk_reqsk_queue_drop(sk, req);
-               reqsk_put(req);
-               goto out;
-
        case DCCP_REQUESTING:
        case DCCP_RESPOND:  /* Cannot happen.
                               It can, it SYNs are crossed. --ANK */
index e3b4aee4244e1702b9ad34ffc788a5268447e8e5..2c7c299ee2b923e8a7d5394f49868d305dd5df74 100644 (file)
@@ -830,7 +830,7 @@ static struct key_vector *resize(struct trie *t, struct key_vector *tn)
        /* Double as long as the resulting node has a number of
         * nonempty nodes that are above the threshold.
         */
-       while (should_inflate(tp, tn) && max_work--) {
+       while (should_inflate(tp, tn) && max_work) {
                tp = inflate(t, tn);
                if (!tp) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -839,17 +839,21 @@ static struct key_vector *resize(struct trie *t, struct key_vector *tn)
                        break;
                }
 
+               max_work--;
                tn = get_child(tp, cindex);
        }
 
+       /* update parent in case inflate failed */
+       tp = node_parent(tn);
+
        /* Return if at least one inflate is run */
        if (max_work != MAX_WORK)
-               return node_parent(tn);
+               return tp;
 
        /* Halve as long as the number of empty children in this
         * node is above threshold.
         */
-       while (should_halve(tp, tn) && max_work--) {
+       while (should_halve(tp, tn) && max_work) {
                tp = halve(t, tn);
                if (!tp) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -858,6 +862,7 @@ static struct key_vector *resize(struct trie *t, struct key_vector *tn)
                        break;
                }
 
+               max_work--;
                tn = get_child(tp, cindex);
        }
 
@@ -865,7 +870,7 @@ static struct key_vector *resize(struct trie *t, struct key_vector *tn)
        if (should_collapse(tn))
                return collapse(t, tn);
 
-       /* update parent in case inflate or halve failed */
+       /* update parent in case halve failed */
        tp = node_parent(tn);
 
        /* Return if at least one deflate was run */
index 844808d9337bccb3621585c513fd6d4a3b9302e4..79c0c9439fdc7dd0b68421a6b229c869f37f7a01 100644 (file)
@@ -403,18 +403,17 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
                                     struct flowi4 *fl4,
                                     const struct request_sock *req)
 {
-       struct rtable *rt;
        const struct inet_request_sock *ireq = inet_rsk(req);
-       struct ip_options_rcu *opt = inet_rsk(req)->opt;
-       struct net *net = sock_net(sk);
-       int flags = inet_sk_flowi_flags(sk);
+       struct net *net = read_pnet(&ireq->ireq_net);
+       struct ip_options_rcu *opt = ireq->opt;
+       struct rtable *rt;
 
-       flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
+       flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
-                          sk->sk_protocol,
-                          flags,
+                          sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
-                          ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
+                          ireq->ir_loc_addr, ireq->ir_rmt_port,
+                          htons(ireq->ir_num));
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -436,9 +435,9 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
                                            const struct request_sock *req)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
+       struct net *net = read_pnet(&ireq->ireq_net);
        struct inet_sock *newinet = inet_sk(newsk);
        struct ip_options_rcu *opt;
-       struct net *net = sock_net(sk);
        struct flowi4 *fl4;
        struct rtable *rt;
 
@@ -446,11 +445,12 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
 
        rcu_read_lock();
        opt = rcu_dereference(newinet->inet_opt);
-       flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
+       flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
-                          ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
+                          ireq->ir_loc_addr, ireq->ir_rmt_port,
+                          htons(ireq->ir_num));
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -495,7 +495,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk,
        u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
                                  lopt->nr_table_entries);
 
-       write_lock(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
        for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
                const struct inet_request_sock *ireq = inet_rsk(req);
 
@@ -508,7 +508,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk,
                        break;
                }
        }
-       write_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
 
        return req;
 }
@@ -571,8 +571,9 @@ static void reqsk_timer_handler(unsigned long data)
        struct inet_connection_sock *icsk = inet_csk(sk_listener);
        struct request_sock_queue *queue = &icsk->icsk_accept_queue;
        struct listen_sock *lopt = queue->listen_opt;
-       int expire = 0, resend = 0;
+       int qlen, expire = 0, resend = 0;
        int max_retries, thresh;
+       u8 defer_accept;
 
        if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
                reqsk_put(req);
@@ -598,21 +599,23 @@ static void reqsk_timer_handler(unsigned long data)
         * embrions; and abort old ones without pity, if old
         * ones are about to clog our table.
         */
-       if (listen_sock_qlen(lopt) >> (lopt->max_qlen_log - 1)) {
+       qlen = listen_sock_qlen(lopt);
+       if (qlen >> (lopt->max_qlen_log - 1)) {
                int young = listen_sock_young(lopt) << 1;
 
                while (thresh > 2) {
-                       if (listen_sock_qlen(lopt) < young)
+                       if (qlen < young)
                                break;
                        thresh--;
                        young <<= 1;
                }
        }
-       if (queue->rskq_defer_accept)
-               max_retries = queue->rskq_defer_accept;
-       syn_ack_recalc(req, thresh, max_retries, queue->rskq_defer_accept,
+       defer_accept = READ_ONCE(queue->rskq_defer_accept);
+       if (defer_accept)
+               max_retries = defer_accept;
+       syn_ack_recalc(req, thresh, max_retries, defer_accept,
                       &expire, &resend);
-       req->rsk_ops->syn_ack_timeout(sk_listener, req);
+       req->rsk_ops->syn_ack_timeout(req);
        if (!expire &&
            (!resend ||
             !inet_rtx_syn_ack(sk_listener, req) ||
@@ -647,10 +650,10 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue,
        setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
        req->rsk_hash = hash;
 
-       write_lock(&queue->syn_wait_lock);
+       spin_lock(&queue->syn_wait_lock);
        req->dl_next = lopt->syn_table[hash];
        lopt->syn_table[hash] = req;
-       write_unlock(&queue->syn_wait_lock);
+       spin_unlock(&queue->syn_wait_lock);
 
        mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
 }
index f984b2001d0acf5191f59b56157a0c29f0db1316..76322c9867d5eb1ffe7808c908e42208046888a7 100644 (file)
@@ -728,7 +728,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
 
        entry.family = sk->sk_family;
 
-       read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
 
        lopt = icsk->icsk_accept_queue.listen_opt;
        if (!lopt || !listen_sock_qlen(lopt))
@@ -776,7 +776,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
        }
 
 out:
-       read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
 
        return err;
 }
index a7aea2048a0d7a624ceb79923d25e9750ec6fa9a..90b49e88e84a63d0b46fc01c2ded1e3dddd61fcb 100644 (file)
@@ -636,10 +636,7 @@ slow_path:
        left = skb->len - hlen;         /* Space per frame */
        ptr = hlen;             /* Where to start from */
 
-       /* for bridged IP traffic encapsulated inside f.e. a vlan header,
-        * we need to make room for the encapsulating header
-        */
-       ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
+       ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
 
        /*
         *      Fragment the datagram.
index a460a87e14f890437a65a2434f6c6ea9fcb56c9d..f0dfe92a00d66a6a58301a94b238baa5cae32fb7 100644 (file)
@@ -300,7 +300,9 @@ static int exp_seq_show(struct seq_file *s, void *v)
                    __nf_ct_l3proto_find(exp->tuple.src.l3num),
                    __nf_ct_l4proto_find(exp->tuple.src.l3num,
                                         exp->tuple.dst.protonum));
-       return seq_putc(s, '\n');
+       seq_putc(s, '\n');
+
+       return 0;
 }
 
 static const struct seq_operations exp_seq_ops = {
index 5554b8f33d41b43dc4ccf3b95322e362bbe3844a..4e90217003e83f67da99317f7a3aa6a6c2d99b3e 100644 (file)
@@ -310,6 +310,34 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
                dst->ops->redirect(dst, sk, skb);
 }
 
+
+/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
+void tcp_req_err(struct sock *sk, u32 seq)
+{
+       struct request_sock *req = inet_reqsk(sk);
+       struct net *net = sock_net(sk);
+
+       /* ICMPs are not backlogged, hence we cannot get
+        * an established socket here.
+        */
+       WARN_ON(req->sk);
+
+       if (seq != tcp_rsk(req)->snt_isn) {
+               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               reqsk_put(req);
+       } else {
+               /*
+                * Still in SYN_RECV, just remove it silently.
+                * There is no good way to pass the error to the newly
+                * created socket, and POSIX does not want network
+                * errors returned from accept().
+                */
+               NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
+               inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+       }
+}
+EXPORT_SYMBOL(tcp_req_err);
+
 /*
  * This routine is called by the ICMP module when it gets some
  * sort of error condition.  If err < 0 then the socket should
@@ -343,8 +371,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        int err;
        struct net *net = dev_net(icmp_skb->dev);
 
-       sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
-                       iph->saddr, th->source, inet_iif(icmp_skb));
+       sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
+                                      th->dest, iph->saddr, ntohs(th->source),
+                                      inet_iif(icmp_skb));
        if (!sk) {
                ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return;
@@ -353,6 +382,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                inet_twsk_put(inet_twsk(sk));
                return;
        }
+       seq = ntohl(th->seq);
+       if (sk->sk_state == TCP_NEW_SYN_RECV)
+               return tcp_req_err(sk, seq);
 
        bh_lock_sock(sk);
        /* If too many ICMPs get dropped on busy
@@ -374,7 +406,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 
        icsk = inet_csk(sk);
        tp = tcp_sk(sk);
-       seq = ntohl(th->seq);
        /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
        fastopen = tp->fastopen_rsk;
        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
@@ -458,38 +489,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        }
 
        switch (sk->sk_state) {
-               struct request_sock *req;
-       case TCP_LISTEN:
-               if (sock_owned_by_user(sk))
-                       goto out;
-
-               req = inet_csk_search_req(sk, th->dest,
-                                         iph->daddr, iph->saddr);
-               if (!req)
-                       goto out;
-
-               /* ICMPs are not backlogged, hence we cannot get
-                  an established socket here.
-                */
-               WARN_ON(req->sk);
-
-               if (seq != tcp_rsk(req)->snt_isn) {
-                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-                       reqsk_put(req);
-                       goto out;
-               }
-
-               /*
-                * Still in SYN_RECV, just remove it silently.
-                * There is no good way to pass the error to the newly
-                * created socket, and POSIX does not want network
-                * errors returned from accept().
-                */
-               inet_csk_reqsk_queue_drop(sk, req);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
-               reqsk_put(req);
-               goto out;
-
        case TCP_SYN_SENT:
        case TCP_SYN_RECV:
                /* Only in fast or simultaneous open. If a fast open socket is
@@ -1909,13 +1908,13 @@ get_req:
                }
                sk        = sk_nulls_next(st->syn_wait_sk);
                st->state = TCP_SEQ_STATE_LISTENING;
-               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        } else {
                icsk = inet_csk(sk);
-               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                if (reqsk_queue_len(&icsk->icsk_accept_queue))
                        goto start_req;
-               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                sk = sk_nulls_next(sk);
        }
 get_sk:
@@ -1927,7 +1926,7 @@ get_sk:
                        goto out;
                }
                icsk = inet_csk(sk);
-               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
 start_req:
                        st->uid         = sock_i_uid(sk);
@@ -1936,7 +1935,7 @@ start_req:
                        st->sbucket     = 0;
                        goto get_req;
                }
-               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        }
        spin_unlock_bh(&ilb->lock);
        st->offset = 0;
@@ -2155,7 +2154,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
        case TCP_SEQ_STATE_OPENREQ:
                if (v) {
                        struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
-                       read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+                       spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                }
        case TCP_SEQ_STATE_LISTENING:
                if (v != SEQ_START_TOKEN)
index 3daa6b5d766d6bdcc2484178cccd7847e54eb3f1..2568fd282873b7436ca2299e20c283e1affd8688 100644 (file)
@@ -327,7 +327,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
        struct request_sock *req;
 
        req = tcp_sk(sk)->fastopen_rsk;
-       req->rsk_ops->syn_ack_timeout(sk, req);
+       req->rsk_ops->syn_ack_timeout(req);
 
        if (req->num_timeout >= max_retries) {
                tcp_write_err(sk);
@@ -539,9 +539,11 @@ static void tcp_write_timer(unsigned long data)
        sock_put(sk);
 }
 
-void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
+void tcp_syn_ack_timeout(const struct request_sock *req)
 {
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
+       struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
+
+       NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
 }
 EXPORT_SYMBOL(tcp_syn_ack_timeout);
 
index 2f3bbe569e8f751b2229305eefce9d2110d1f8c7..6927f3fb5597fd2013b885cddb35bed852b950d5 100644 (file)
@@ -124,7 +124,7 @@ struct request_sock *inet6_csk_search_req(struct sock *sk,
        u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd,
                                   lopt->nr_table_entries);
 
-       write_lock(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
        for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
                const struct inet_request_sock *ireq = inet_rsk(req);
 
@@ -138,7 +138,7 @@ struct request_sock *inet6_csk_search_req(struct sock *sk,
                        break;
                }
        }
-       write_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
 
        return req;
 }
index 544b0a9da1b59db2fc59cbaf11a1eccd1de8dd3f..12331efd49cf865b2e3ce934a734af47f77928a1 100644 (file)
@@ -83,7 +83,8 @@ static int reject_tg6_check(const struct xt_tgchk_param *par)
                return -EINVAL;
        } else if (rejinfo->with == IP6T_TCP_RESET) {
                /* Must specify that it's a TCP packet */
-               if (e->ipv6.proto != IPPROTO_TCP ||
+               if (!(e->ipv6.flags & IP6T_F_PROTO) ||
+                   e->ipv6.proto != IPPROTO_TCP ||
                    (e->ipv6.invflags & XT_INV_PROTO)) {
                        pr_info("TCP_RESET illegal for non-tcp\n");
                        return -EINVAL;
index 6e3f90db038cb001dad5c4dddef88d93ecdbf5f3..4a4e6d30c448b1ee8ec4948e025ed0a19d505553 100644 (file)
@@ -324,18 +324,20 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 {
        const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
        const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
+       struct net *net = dev_net(skb->dev);
+       struct request_sock *fastopen;
        struct ipv6_pinfo *np;
-       struct sock *sk;
-       int err;
        struct tcp_sock *tp;
-       struct request_sock *fastopen;
        __u32 seq, snd_una;
-       struct net *net = dev_net(skb->dev);
+       struct sock *sk;
+       int err;
 
-       sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
-                       th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
+       sk = __inet6_lookup_established(net, &tcp_hashinfo,
+                                       &hdr->daddr, th->dest,
+                                       &hdr->saddr, ntohs(th->source),
+                                       skb->dev->ifindex);
 
-       if (sk == NULL) {
+       if (!sk) {
                ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
                                   ICMP6_MIB_INERRORS);
                return;
@@ -345,6 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                inet_twsk_put(inet_twsk(sk));
                return;
        }
+       seq = ntohl(th->seq);
+       if (sk->sk_state == TCP_NEW_SYN_RECV)
+               return tcp_req_err(sk, seq);
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -359,7 +364,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        }
 
        tp = tcp_sk(sk);
-       seq = ntohl(th->seq);
        /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
        fastopen = tp->fastopen_rsk;
        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
@@ -403,33 +407,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        /* Might be for an request_sock */
        switch (sk->sk_state) {
-               struct request_sock *req;
-       case TCP_LISTEN:
-               if (sock_owned_by_user(sk))
-                       goto out;
-
-               /* Note : We use inet6_iif() here, not tcp_v6_iif() */
-               req = inet6_csk_search_req(sk, th->dest, &hdr->daddr,
-                                          &hdr->saddr, inet6_iif(skb));
-               if (!req)
-                       goto out;
-
-               /* ICMPs are not backlogged, hence we cannot get
-                * an established socket here.
-                */
-               WARN_ON(req->sk != NULL);
-
-               if (seq != tcp_rsk(req)->snt_isn) {
-                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-                       reqsk_put(req);
-                       goto out;
-               }
-
-               inet_csk_reqsk_queue_drop(sk, req);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
-               reqsk_put(req);
-               goto out;
-
        case TCP_SYN_SENT:
        case TCP_SYN_RECV:
                /* Only in fast or simultaneous open. If a fast open socket is
index a4b5e2a435acb4c2fafaf26ccb4fce349a151f9a..45da11afa785b2779857b3786881158c821263b3 100644 (file)
@@ -47,9 +47,11 @@ seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
                return 0;
 
        counter = acct->counter;
-       return seq_printf(s, "packets=%llu bytes=%llu ",
-                         (unsigned long long)atomic64_read(&counter[dir].packets),
-                         (unsigned long long)atomic64_read(&counter[dir].bytes));
+       seq_printf(s, "packets=%llu bytes=%llu ",
+                  (unsigned long long)atomic64_read(&counter[dir].packets),
+                  (unsigned long long)atomic64_read(&counter[dir].bytes));
+
+       return 0;
 };
 EXPORT_SYMBOL_GPL(seq_print_acct);
 
index 91a1837acd0e8fb981ccea73ae262197afecfb33..7a17070c5dabb979c2cb90b7b62f35a79cdc92c0 100644 (file)
@@ -561,7 +561,9 @@ static int exp_seq_show(struct seq_file *s, void *v)
                                   helper->expect_policy[expect->class].name);
        }
 
-       return seq_putc(s, '\n');
+       seq_putc(s, '\n');
+
+       return 0;
 }
 
 static const struct seq_operations exp_seq_ops = {
index ea51833c8f5a3eb2ec625a241f66f37be6ddb812..f7e3371ce856a0a51a0fd4ee106e3602ea0190f9 100644 (file)
@@ -687,11 +687,10 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
        if (!try_module_get(afi->owner))
                return -EAFNOSUPPORT;
 
+       err = -ENOMEM;
        table = kzalloc(sizeof(*table), GFP_KERNEL);
-       if (table == NULL) {
-               module_put(afi->owner);
-               return -ENOMEM;
-       }
+       if (table == NULL)
+               goto err1;
 
        nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN);
        INIT_LIST_HEAD(&table->chains);
@@ -700,13 +699,16 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
 
        nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
        err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
-       if (err < 0) {
-               kfree(table);
-               module_put(afi->owner);
-               return err;
-       }
+       if (err < 0)
+               goto err2;
+
        list_add_tail_rcu(&table->list, &afi->tables);
        return 0;
+err2:
+       kfree(table);
+err1:
+       module_put(afi->owner);
+       return err;
 }
 
 static int nft_flush_table(struct nft_ctx *ctx)
@@ -3136,6 +3138,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                elem.flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
                if (elem.flags & ~NFT_SET_ELEM_INTERVAL_END)
                        return -EINVAL;
+               if (!(set->flags & NFT_SET_INTERVAL) &&
+                   elem.flags & NFT_SET_ELEM_INTERVAL_END)
+                       return -EINVAL;
        }
 
        if (set->flags & NFT_SET_MAP) {
index 61d04bf9be2b8ea994ddee984351c5d80c958504..957b83a0223b8eef159b572a2b685095a2d3e0ab 100644 (file)
@@ -998,11 +998,13 @@ static int seq_show(struct seq_file *s, void *v)
 {
        const struct nfulnl_instance *inst = v;
 
-       return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
-                         inst->group_num,
-                         inst->peer_portid, inst->qlen,
-                         inst->copy_mode, inst->copy_range,
-                         inst->flushtimeout, atomic_read(&inst->use));
+       seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
+                  inst->group_num,
+                  inst->peer_portid, inst->qlen,
+                  inst->copy_mode, inst->copy_range,
+                  inst->flushtimeout, atomic_read(&inst->use));
+
+       return 0;
 }
 
 static const struct seq_operations nful_seq_ops = {
index 46214f245665a0f70138cbd025bb9d1917a7e32f..2c75361077f7e5b903b851f2df09873892f37f7e 100644 (file)
@@ -37,10 +37,11 @@ static bool nft_rbtree_lookup(const struct nft_set *set,
 {
        const struct nft_rbtree *priv = nft_set_priv(set);
        const struct nft_rbtree_elem *rbe, *interval = NULL;
-       const struct rb_node *parent = priv->root.rb_node;
+       const struct rb_node *parent;
        int d;
 
        spin_lock_bh(&nft_rbtree_lock);
+       parent = priv->root.rb_node;
        while (parent != NULL) {
                rbe = rb_entry(parent, struct nft_rbtree_elem, node);
 
@@ -158,7 +159,6 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
        struct nft_rbtree_elem *rbe;
        int d;
 
-       spin_lock_bh(&nft_rbtree_lock);
        while (parent != NULL) {
                rbe = rb_entry(parent, struct nft_rbtree_elem, node);
 
@@ -173,11 +173,9 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
                            !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
                                nft_data_copy(&elem->data, rbe->data);
                        elem->flags = rbe->flags;
-                       spin_unlock_bh(&nft_rbtree_lock);
                        return 0;
                }
        }
-       spin_unlock_bh(&nft_rbtree_lock);
        return -ENOENT;
 }
 
index f440f57a452fd9650d73f0a13b644383793463f5..50a52043650fd95989eb6618a36cbb8dba0f6b18 100644 (file)
@@ -56,8 +56,7 @@ physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
 
        /* This only makes sense in the FORWARD and POSTROUTING chains */
        if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
-           (!!(nf_bridge->mask & BRNF_BRIDGED) ^
-           !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
+           (!!nf_bridge->physoutdev ^ !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
                return false;
 
        if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
index b91ac5946ad1a4b88123c63dc33106ade6ce5bd3..5102c3cc4eec4ecec6698859935d7769d37a174c 100644 (file)
@@ -1916,14 +1916,19 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                }
        }
 
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               status |= TP_STATUS_CSUMNOTREADY;
-
        snaplen = skb->len;
 
        res = run_filter(skb, sk, snaplen);
        if (!res)
                goto drop_n_restore;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               status |= TP_STATUS_CSUMNOTREADY;
+       else if (skb->pkt_type != PACKET_OUTGOING &&
+                (skb->ip_summed == CHECKSUM_COMPLETE ||
+                 skb_csum_unnecessary(skb)))
+               status |= TP_STATUS_CSUM_VALID;
+
        if (snaplen > res)
                snaplen = res;
 
@@ -3030,6 +3035,11 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                aux.tp_status = TP_STATUS_USER;
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        aux.tp_status |= TP_STATUS_CSUMNOTREADY;
+               else if (skb->pkt_type != PACKET_OUTGOING &&
+                        (skb->ip_summed == CHECKSUM_COMPLETE ||
+                         skb_csum_unnecessary(skb)))
+                       aux.tp_status |= TP_STATUS_CSUM_VALID;
+
                aux.tp_len = origlen;
                aux.tp_snaplen = skb->len;
                aux.tp_mac = 0;
index 3e776776f42c1015f0e989e8302dcd519ca6dc1c..073809f4125f276799418342f9c60519a3da82e9 100644 (file)
@@ -798,7 +798,8 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
        struct file *file = iocb->ki_filp;
        struct socket *sock = file->private_data;
-       struct msghdr msg = {.msg_iter = *to};
+       struct msghdr msg = {.msg_iter = *to,
+                            .msg_iocb = iocb};
        ssize_t res;
 
        if (file->f_flags & O_NONBLOCK)
@@ -819,7 +820,8 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct socket *sock = file->private_data;
-       struct msghdr msg = {.msg_iter = *from};
+       struct msghdr msg = {.msg_iter = *from,
+                            .msg_iocb = iocb};
        ssize_t res;
 
        if (iocb->ki_pos != 0)
@@ -1894,6 +1896,8 @@ static ssize_t copy_msghdr_from_user(struct msghdr *kmsg,
        if (nr_segs > UIO_MAXIOV)
                return -EMSGSIZE;
 
+       kmsg->msg_iocb = NULL;
+
        err = rw_copy_check_uvector(save_addr ? READ : WRITE,
                                    uiov, nr_segs,
                                    UIO_FASTIOV, *iov, iov);
index c9bfa004abed1b7d5a47b9bf509eabce32ad956c..46568b85c3339f57a0d6835e82eff4b67c3ba326 100644 (file)
@@ -47,11 +47,20 @@ EXPORT_SYMBOL_GPL(netdev_switch_parent_id_get);
 int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
 {
        const struct swdev_ops *ops = dev->swdev_ops;
+       struct net_device *lower_dev;
+       struct list_head *iter;
+       int err = -EOPNOTSUPP;
 
-       if (!ops || !ops->swdev_port_stp_update)
-               return -EOPNOTSUPP;
-       WARN_ON(!ops->swdev_parent_id_get);
-       return ops->swdev_port_stp_update(dev, state);
+       if (ops && ops->swdev_port_stp_update)
+               return ops->swdev_port_stp_update(dev, state);
+
+       netdev_for_each_lower_dev(dev, lower_dev, iter) {
+               err = netdev_switch_port_stp_update(lower_dev, state);
+               if (err && err != -EOPNOTSUPP)
+                       return err;
+       }
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(netdev_switch_port_stp_update);