3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
67 #include <asm/uaccess.h>
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
75 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 struct request_sock *req);
79 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static void __tcp_v6_send_check(struct sk_buff *skb,
81 const struct in6_addr *saddr,
82 const struct in6_addr *daddr);
84 static const struct inet_connection_sock_af_ops ipv6_mapped;
85 static const struct inet_connection_sock_af_ops ipv6_specific;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
90 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
91 const struct in6_addr *addr)
97 static void tcp_v6_hash(struct sock *sk)
99 if (sk->sk_state != TCP_CLOSE) {
100 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
105 __inet6_hash(sk, NULL);
110 static __inline__ __sum16 tcp_v6_check(int len,
111 const struct in6_addr *saddr,
112 const struct in6_addr *daddr,
115 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
118 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
120 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121 ipv6_hdr(skb)->saddr.s6_addr32,
123 tcp_hdr(skb)->source);
126 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130 struct inet_sock *inet = inet_sk(sk);
131 struct inet_connection_sock *icsk = inet_csk(sk);
132 struct ipv6_pinfo *np = inet6_sk(sk);
133 struct tcp_sock *tp = tcp_sk(sk);
134 struct in6_addr *saddr = NULL, *final_p, final;
137 struct dst_entry *dst;
141 if (addr_len < SIN6_LEN_RFC2133)
144 if (usin->sin6_family != AF_INET6)
145 return -EAFNOSUPPORT;
147 memset(&fl6, 0, sizeof(fl6));
150 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151 IP6_ECN_flow_init(fl6.flowlabel);
152 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153 struct ip6_flowlabel *flowlabel;
154 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155 if (flowlabel == NULL)
157 usin->sin6_addr = flowlabel->dst;
158 fl6_sock_release(flowlabel);
163 * connect() to INADDR_ANY means loopback (BSD'ism).
166 if(ipv6_addr_any(&usin->sin6_addr))
167 usin->sin6_addr.s6_addr[15] = 0x1;
169 addr_type = ipv6_addr_type(&usin->sin6_addr);
171 if(addr_type & IPV6_ADDR_MULTICAST)
174 if (addr_type&IPV6_ADDR_LINKLOCAL) {
175 if (addr_len >= sizeof(struct sockaddr_in6) &&
176 usin->sin6_scope_id) {
177 /* If interface is set while binding, indices
180 if (sk->sk_bound_dev_if &&
181 sk->sk_bound_dev_if != usin->sin6_scope_id)
184 sk->sk_bound_dev_if = usin->sin6_scope_id;
187 /* Connect to link-local address requires an interface */
188 if (!sk->sk_bound_dev_if)
192 if (tp->rx_opt.ts_recent_stamp &&
193 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
194 tp->rx_opt.ts_recent = 0;
195 tp->rx_opt.ts_recent_stamp = 0;
199 np->daddr = usin->sin6_addr;
200 np->flow_label = fl6.flowlabel;
206 if (addr_type == IPV6_ADDR_MAPPED) {
207 u32 exthdrlen = icsk->icsk_ext_hdr_len;
208 struct sockaddr_in sin;
210 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
212 if (__ipv6_only_sock(sk))
215 sin.sin_family = AF_INET;
216 sin.sin_port = usin->sin6_port;
217 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
219 icsk->icsk_af_ops = &ipv6_mapped;
220 sk->sk_backlog_rcv = tcp_v4_do_rcv;
221 #ifdef CONFIG_TCP_MD5SIG
222 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
225 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
228 icsk->icsk_ext_hdr_len = exthdrlen;
229 icsk->icsk_af_ops = &ipv6_specific;
230 sk->sk_backlog_rcv = tcp_v6_do_rcv;
231 #ifdef CONFIG_TCP_MD5SIG
232 tp->af_specific = &tcp_sock_ipv6_specific;
236 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
237 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
244 if (!ipv6_addr_any(&np->rcv_saddr))
245 saddr = &np->rcv_saddr;
247 fl6.flowi6_proto = IPPROTO_TCP;
248 fl6.daddr = np->daddr;
249 fl6.saddr = saddr ? *saddr : np->saddr;
250 fl6.flowi6_oif = sk->sk_bound_dev_if;
251 fl6.flowi6_mark = sk->sk_mark;
252 fl6.fl6_dport = usin->sin6_port;
253 fl6.fl6_sport = inet->inet_sport;
255 final_p = fl6_update_dst(&fl6, np->opt, &final);
257 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
259 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
267 np->rcv_saddr = *saddr;
270 /* set the source address */
272 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
274 sk->sk_gso_type = SKB_GSO_TCPV6;
275 __ip6_dst_store(sk, dst, NULL, NULL);
277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 struct inet_peer *peer = rt6_get_peer(rt);
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
297 icsk->icsk_ext_hdr_len = 0;
299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
302 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
304 inet->inet_dport = usin->sin6_port;
306 tcp_set_state(sk, TCP_SYN_SENT);
307 err = inet6_hash_connect(&tcp_death_row, sk);
312 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
317 err = tcp_connect(sk);
324 tcp_set_state(sk, TCP_CLOSE);
327 inet->inet_dport = 0;
328 sk->sk_route_caps = 0;
332 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 u8 type, u8 code, int offset, __be32 info)
335 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
336 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
337 struct ipv6_pinfo *np;
342 struct net *net = dev_net(skb->dev);
344 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
345 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
348 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
353 if (sk->sk_state == TCP_TIME_WAIT) {
354 inet_twsk_put(inet_twsk(sk));
359 if (sock_owned_by_user(sk))
360 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
362 if (sk->sk_state == TCP_CLOSE)
365 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
371 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
380 if (type == ICMPV6_PKT_TOOBIG) {
381 struct dst_entry *dst;
383 if (sock_owned_by_user(sk))
385 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
388 /* icmp should have updated the destination cache entry */
389 dst = __sk_dst_check(sk, np->dst_cookie);
392 struct inet_sock *inet = inet_sk(sk);
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
399 memset(&fl6, 0, sizeof(fl6));
400 fl6.flowi6_proto = IPPROTO_TCP;
401 fl6.daddr = np->daddr;
402 fl6.saddr = np->saddr;
403 fl6.flowi6_oif = sk->sk_bound_dev_if;
404 fl6.flowi6_mark = sk->sk_mark;
405 fl6.fl6_dport = inet->inet_dport;
406 fl6.fl6_sport = inet->inet_sport;
407 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
409 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
411 sk->sk_err_soft = -PTR_ERR(dst);
418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 tcp_sync_mss(sk, dst_mtu(dst));
420 tcp_simple_retransmit(sk);
421 } /* else let the usual retransmit timer handle it */
426 icmpv6_err_convert(type, code, &err);
428 /* Might be for an request_sock */
429 switch (sk->sk_state) {
430 struct request_sock *req, **prev;
432 if (sock_owned_by_user(sk))
435 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 &hdr->saddr, inet6_iif(skb));
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
443 WARN_ON(req->sk != NULL);
445 if (seq != tcp_rsk(req)->snt_isn) {
446 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
450 inet_csk_reqsk_queue_drop(sk, req, prev);
454 case TCP_SYN_RECV: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk)) {
458 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
462 sk->sk_err_soft = err;
466 if (!sock_owned_by_user(sk) && np->recverr) {
468 sk->sk_error_report(sk);
470 sk->sk_err_soft = err;
478 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp)
481 struct inet6_request_sock *treq = inet6_rsk(req);
482 struct ipv6_pinfo *np = inet6_sk(sk);
483 struct sk_buff * skb;
484 struct ipv6_txoptions *opt = NULL;
485 struct in6_addr * final_p, final;
487 struct dst_entry *dst;
490 memset(&fl6, 0, sizeof(fl6));
491 fl6.flowi6_proto = IPPROTO_TCP;
492 fl6.daddr = treq->rmt_addr;
493 fl6.saddr = treq->loc_addr;
495 fl6.flowi6_oif = treq->iif;
496 fl6.flowi6_mark = sk->sk_mark;
497 fl6.fl6_dport = inet_rsk(req)->rmt_port;
498 fl6.fl6_sport = inet_rsk(req)->loc_port;
499 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
502 final_p = fl6_update_dst(&fl6, opt, &final);
504 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
510 skb = tcp_make_synack(sk, dst, req, rvp);
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
515 fl6.daddr = treq->rmt_addr;
516 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 err = net_xmit_eval(err);
521 if (opt && opt != np->opt)
522 sock_kfree_s(sk, opt, opt->tot_len);
527 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp)
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp);
534 static void tcp_v6_reqsk_destructor(struct request_sock *req)
536 kfree_skb(inet6_rsk(req)->pktopts);
539 #ifdef CONFIG_TCP_MD5SIG
540 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
541 const struct in6_addr *addr)
543 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
546 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
547 struct sock *addr_sk)
549 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
552 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
553 struct request_sock *req)
555 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
558 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
561 struct tcp_md5sig cmd;
562 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
564 if (optlen < sizeof(cmd))
567 if (copy_from_user(&cmd, optval, sizeof(cmd)))
570 if (sin6->sin6_family != AF_INET6)
573 if (!cmd.tcpm_keylen) {
574 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
575 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
577 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
581 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
584 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
585 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
586 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
588 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
589 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
592 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
593 const struct in6_addr *daddr,
594 const struct in6_addr *saddr, int nbytes)
596 struct tcp6_pseudohdr *bp;
597 struct scatterlist sg;
599 bp = &hp->md5_blk.ip6;
600 /* 1. TCP pseudo-header (RFC2460) */
603 bp->protocol = cpu_to_be32(IPPROTO_TCP);
604 bp->len = cpu_to_be32(nbytes);
606 sg_init_one(&sg, bp, sizeof(*bp));
607 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
610 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
611 const struct in6_addr *daddr, struct in6_addr *saddr,
612 const struct tcphdr *th)
614 struct tcp_md5sig_pool *hp;
615 struct hash_desc *desc;
617 hp = tcp_get_md5sig_pool();
619 goto clear_hash_noput;
620 desc = &hp->md5_desc;
622 if (crypto_hash_init(desc))
624 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
626 if (tcp_md5_hash_header(hp, th))
628 if (tcp_md5_hash_key(hp, key))
630 if (crypto_hash_final(desc, md5_hash))
633 tcp_put_md5sig_pool();
637 tcp_put_md5sig_pool();
639 memset(md5_hash, 0, 16);
643 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
644 const struct sock *sk,
645 const struct request_sock *req,
646 const struct sk_buff *skb)
648 const struct in6_addr *saddr, *daddr;
649 struct tcp_md5sig_pool *hp;
650 struct hash_desc *desc;
651 const struct tcphdr *th = tcp_hdr(skb);
654 saddr = &inet6_sk(sk)->saddr;
655 daddr = &inet6_sk(sk)->daddr;
657 saddr = &inet6_rsk(req)->loc_addr;
658 daddr = &inet6_rsk(req)->rmt_addr;
660 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
661 saddr = &ip6h->saddr;
662 daddr = &ip6h->daddr;
665 hp = tcp_get_md5sig_pool();
667 goto clear_hash_noput;
668 desc = &hp->md5_desc;
670 if (crypto_hash_init(desc))
673 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
675 if (tcp_md5_hash_header(hp, th))
677 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
679 if (tcp_md5_hash_key(hp, key))
681 if (crypto_hash_final(desc, md5_hash))
684 tcp_put_md5sig_pool();
688 tcp_put_md5sig_pool();
690 memset(md5_hash, 0, 16);
694 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
696 const __u8 *hash_location = NULL;
697 struct tcp_md5sig_key *hash_expected;
698 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
699 const struct tcphdr *th = tcp_hdr(skb);
703 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
704 hash_location = tcp_parse_md5sig_option(th);
706 /* We've parsed the options - do we have a hash? */
707 if (!hash_expected && !hash_location)
710 if (hash_expected && !hash_location) {
711 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
715 if (!hash_expected && hash_location) {
716 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
720 /* check the signature */
721 genhash = tcp_v6_md5_hash_skb(newhash,
725 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
726 if (net_ratelimit()) {
727 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
728 genhash ? "failed" : "mismatch",
729 &ip6h->saddr, ntohs(th->source),
730 &ip6h->daddr, ntohs(th->dest));
738 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
740 .obj_size = sizeof(struct tcp6_request_sock),
741 .rtx_syn_ack = tcp_v6_rtx_synack,
742 .send_ack = tcp_v6_reqsk_send_ack,
743 .destructor = tcp_v6_reqsk_destructor,
744 .send_reset = tcp_v6_send_reset,
745 .syn_ack_timeout = tcp_syn_ack_timeout,
748 #ifdef CONFIG_TCP_MD5SIG
749 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
750 .md5_lookup = tcp_v6_reqsk_md5_lookup,
751 .calc_md5_hash = tcp_v6_md5_hash_skb,
755 static void __tcp_v6_send_check(struct sk_buff *skb,
756 const struct in6_addr *saddr, const struct in6_addr *daddr)
758 struct tcphdr *th = tcp_hdr(skb);
760 if (skb->ip_summed == CHECKSUM_PARTIAL) {
761 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
762 skb->csum_start = skb_transport_header(skb) - skb->head;
763 skb->csum_offset = offsetof(struct tcphdr, check);
765 th->check = tcp_v6_check(skb->len, saddr, daddr,
766 csum_partial(th, th->doff << 2,
771 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
773 struct ipv6_pinfo *np = inet6_sk(sk);
775 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
778 static int tcp_v6_gso_send_check(struct sk_buff *skb)
780 const struct ipv6hdr *ipv6h;
783 if (!pskb_may_pull(skb, sizeof(*th)))
786 ipv6h = ipv6_hdr(skb);
790 skb->ip_summed = CHECKSUM_PARTIAL;
791 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
795 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
798 const struct ipv6hdr *iph = skb_gro_network_header(skb);
800 switch (skb->ip_summed) {
801 case CHECKSUM_COMPLETE:
802 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
804 skb->ip_summed = CHECKSUM_UNNECESSARY;
810 NAPI_GRO_CB(skb)->flush = 1;
814 return tcp_gro_receive(head, skb);
817 static int tcp6_gro_complete(struct sk_buff *skb)
819 const struct ipv6hdr *iph = ipv6_hdr(skb);
820 struct tcphdr *th = tcp_hdr(skb);
822 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
823 &iph->saddr, &iph->daddr, 0);
824 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
826 return tcp_gro_complete(skb);
829 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
830 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
832 const struct tcphdr *th = tcp_hdr(skb);
834 struct sk_buff *buff;
836 struct net *net = dev_net(skb_dst(skb)->dev);
837 struct sock *ctl_sk = net->ipv6.tcp_sk;
838 unsigned int tot_len = sizeof(struct tcphdr);
839 struct dst_entry *dst;
843 tot_len += TCPOLEN_TSTAMP_ALIGNED;
844 #ifdef CONFIG_TCP_MD5SIG
846 tot_len += TCPOLEN_MD5SIG_ALIGNED;
849 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
854 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
856 t1 = (struct tcphdr *) skb_push(buff, tot_len);
857 skb_reset_transport_header(buff);
859 /* Swap the send and the receive. */
860 memset(t1, 0, sizeof(*t1));
861 t1->dest = th->source;
862 t1->source = th->dest;
863 t1->doff = tot_len / 4;
864 t1->seq = htonl(seq);
865 t1->ack_seq = htonl(ack);
866 t1->ack = !rst || !th->ack;
868 t1->window = htons(win);
870 topt = (__be32 *)(t1 + 1);
873 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
874 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
875 *topt++ = htonl(tcp_time_stamp);
879 #ifdef CONFIG_TCP_MD5SIG
881 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
882 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
883 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
884 &ipv6_hdr(skb)->saddr,
885 &ipv6_hdr(skb)->daddr, t1);
889 memset(&fl6, 0, sizeof(fl6));
890 fl6.daddr = ipv6_hdr(skb)->saddr;
891 fl6.saddr = ipv6_hdr(skb)->daddr;
893 buff->ip_summed = CHECKSUM_PARTIAL;
896 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
898 fl6.flowi6_proto = IPPROTO_TCP;
899 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
900 fl6.flowi6_oif = inet6_iif(skb);
901 fl6.fl6_dport = t1->dest;
902 fl6.fl6_sport = t1->source;
903 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
905 /* Pass a socket to ip6_dst_lookup either it is for RST
906 * Underlying function will use this to retrieve the network
909 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
911 skb_dst_set(buff, dst);
912 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
913 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
915 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
922 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
924 const struct tcphdr *th = tcp_hdr(skb);
925 u32 seq = 0, ack_seq = 0;
926 struct tcp_md5sig_key *key = NULL;
927 #ifdef CONFIG_TCP_MD5SIG
928 const __u8 *hash_location = NULL;
929 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
930 unsigned char newhash[16];
932 struct sock *sk1 = NULL;
938 if (!ipv6_unicast_destination(skb))
941 #ifdef CONFIG_TCP_MD5SIG
942 hash_location = tcp_parse_md5sig_option(th);
943 if (!sk && hash_location) {
945 * active side is lost. Try to find listening socket through
946 * source port, and then find md5 key through listening socket.
947 * we are not loose security here:
948 * Incoming packet is checked with md5 hash with finding key,
949 * no RST generated if md5 hash doesn't match.
951 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
952 &tcp_hashinfo, &ipv6h->daddr,
953 ntohs(th->source), inet6_iif(skb));
958 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
962 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
963 if (genhash || memcmp(hash_location, newhash, 16) != 0)
966 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
971 seq = ntohl(th->ack_seq);
973 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
976 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
978 #ifdef CONFIG_TCP_MD5SIG
987 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
988 struct tcp_md5sig_key *key, u8 tclass)
990 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
993 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
995 struct inet_timewait_sock *tw = inet_twsk(sk);
996 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
998 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
999 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1000 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1006 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1007 struct request_sock *req)
1009 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1010 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1014 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1016 struct request_sock *req, **prev;
1017 const struct tcphdr *th = tcp_hdr(skb);
1020 /* Find possible connection requests. */
1021 req = inet6_csk_search_req(sk, &prev, th->source,
1022 &ipv6_hdr(skb)->saddr,
1023 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1025 return tcp_check_req(sk, skb, req, prev);
1027 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1028 &ipv6_hdr(skb)->saddr, th->source,
1029 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1032 if (nsk->sk_state != TCP_TIME_WAIT) {
1036 inet_twsk_put(inet_twsk(nsk));
1040 #ifdef CONFIG_SYN_COOKIES
1042 sk = cookie_v6_check(sk, skb);
1047 /* FIXME: this is substantially similar to the ipv4 code.
1048 * Can some kind of merge be done? -- erics
1050 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1052 struct tcp_extend_values tmp_ext;
1053 struct tcp_options_received tmp_opt;
1054 const u8 *hash_location;
1055 struct request_sock *req;
1056 struct inet6_request_sock *treq;
1057 struct ipv6_pinfo *np = inet6_sk(sk);
1058 struct tcp_sock *tp = tcp_sk(sk);
1059 __u32 isn = TCP_SKB_CB(skb)->when;
1060 struct dst_entry *dst = NULL;
1061 int want_cookie = 0;
1063 if (skb->protocol == htons(ETH_P_IP))
1064 return tcp_v4_conn_request(sk, skb);
1066 if (!ipv6_unicast_destination(skb))
1069 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1070 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1075 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1078 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1082 #ifdef CONFIG_TCP_MD5SIG
1083 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1086 tcp_clear_options(&tmp_opt);
1087 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1088 tmp_opt.user_mss = tp->rx_opt.user_mss;
1089 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1091 if (tmp_opt.cookie_plus > 0 &&
1092 tmp_opt.saw_tstamp &&
1093 !tp->rx_opt.cookie_out_never &&
1094 (sysctl_tcp_cookie_size > 0 ||
1095 (tp->cookie_values != NULL &&
1096 tp->cookie_values->cookie_desired > 0))) {
1099 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1100 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1102 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1105 /* Secret recipe starts with IP addresses */
1106 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1111 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1117 /* plus variable length Initiator Cookie */
1120 *c++ ^= *hash_location++;
1122 want_cookie = 0; /* not our kind of cookie */
1123 tmp_ext.cookie_out_never = 0; /* false */
1124 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1125 } else if (!tp->rx_opt.cookie_in_always) {
1126 /* redundant indications, but ensure initialization. */
1127 tmp_ext.cookie_out_never = 1; /* true */
1128 tmp_ext.cookie_plus = 0;
1132 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1134 if (want_cookie && !tmp_opt.saw_tstamp)
1135 tcp_clear_options(&tmp_opt);
1137 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1138 tcp_openreq_init(req, &tmp_opt, skb);
1140 treq = inet6_rsk(req);
1141 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1142 treq->loc_addr = ipv6_hdr(skb)->daddr;
1143 if (!want_cookie || tmp_opt.tstamp_ok)
1144 TCP_ECN_create_request(req, tcp_hdr(skb));
1146 treq->iif = sk->sk_bound_dev_if;
1148 /* So that link locals have meaning */
1149 if (!sk->sk_bound_dev_if &&
1150 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1151 treq->iif = inet6_iif(skb);
1154 struct inet_peer *peer = NULL;
1156 if (ipv6_opt_accepted(sk, skb) ||
1157 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1158 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1159 atomic_inc(&skb->users);
1160 treq->pktopts = skb;
1164 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1165 req->cookie_ts = tmp_opt.tstamp_ok;
1169 /* VJ's idea. We save last timestamp seen
1170 * from the destination in peer table, when entering
1171 * state TIME-WAIT, and check against it before
1172 * accepting new connection request.
1174 * If "isn" is not zero, this request hit alive
1175 * timewait bucket, so that all the necessary checks
1176 * are made in the function processing timewait state.
1178 if (tmp_opt.saw_tstamp &&
1179 tcp_death_row.sysctl_tw_recycle &&
1180 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1181 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1182 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1184 inet_peer_refcheck(peer);
1185 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1186 (s32)(peer->tcp_ts - req->ts_recent) >
1188 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1189 goto drop_and_release;
1192 /* Kill the following clause, if you dislike this way. */
1193 else if (!sysctl_tcp_syncookies &&
1194 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1195 (sysctl_max_syn_backlog >> 2)) &&
1196 (!peer || !peer->tcp_ts_stamp) &&
1197 (!dst || !dst_metric(dst, RTAX_RTT))) {
1198 /* Without syncookies last quarter of
1199 * backlog is filled with destinations,
1200 * proven to be alive.
1201 * It means that we continue to communicate
1202 * to destinations, already remembered
1203 * to the moment of synflood.
1205 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1206 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1207 goto drop_and_release;
1210 isn = tcp_v6_init_sequence(skb);
1213 tcp_rsk(req)->snt_isn = isn;
1214 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1216 security_inet_conn_request(sk, skb, req);
1218 if (tcp_v6_send_synack(sk, req,
1219 (struct request_values *)&tmp_ext) ||
1223 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1231 return 0; /* don't send reset */
1234 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1235 struct request_sock *req,
1236 struct dst_entry *dst)
1238 struct inet6_request_sock *treq;
1239 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1240 struct tcp6_sock *newtcp6sk;
1241 struct inet_sock *newinet;
1242 struct tcp_sock *newtp;
1244 struct ipv6_txoptions *opt;
1245 #ifdef CONFIG_TCP_MD5SIG
1246 struct tcp_md5sig_key *key;
1249 if (skb->protocol == htons(ETH_P_IP)) {
1254 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1259 newtcp6sk = (struct tcp6_sock *)newsk;
1260 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1262 newinet = inet_sk(newsk);
1263 newnp = inet6_sk(newsk);
1264 newtp = tcp_sk(newsk);
1266 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1268 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1270 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1272 newnp->rcv_saddr = newnp->saddr;
1274 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1275 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1276 #ifdef CONFIG_TCP_MD5SIG
1277 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1280 newnp->ipv6_ac_list = NULL;
1281 newnp->ipv6_fl_list = NULL;
1282 newnp->pktoptions = NULL;
1284 newnp->mcast_oif = inet6_iif(skb);
1285 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1286 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1289 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1290 * here, tcp_create_openreq_child now does this for us, see the comment in
1291 * that function for the gory details. -acme
1294 /* It is tricky place. Until this moment IPv4 tcp
1295 worked with IPv6 icsk.icsk_af_ops.
1298 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1303 treq = inet6_rsk(req);
1306 if (sk_acceptq_is_full(sk))
1310 dst = inet6_csk_route_req(sk, req);
1315 newsk = tcp_create_openreq_child(sk, req, skb);
1320 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1321 * count here, tcp_create_openreq_child now does this for us, see the
1322 * comment in that function for the gory details. -acme
1325 newsk->sk_gso_type = SKB_GSO_TCPV6;
1326 __ip6_dst_store(newsk, dst, NULL, NULL);
1328 newtcp6sk = (struct tcp6_sock *)newsk;
1329 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1331 newtp = tcp_sk(newsk);
1332 newinet = inet_sk(newsk);
1333 newnp = inet6_sk(newsk);
1335 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1337 newnp->daddr = treq->rmt_addr;
1338 newnp->saddr = treq->loc_addr;
1339 newnp->rcv_saddr = treq->loc_addr;
1340 newsk->sk_bound_dev_if = treq->iif;
1342 /* Now IPv6 options...
1344 First: no IPv4 options.
1346 newinet->inet_opt = NULL;
1347 newnp->ipv6_ac_list = NULL;
1348 newnp->ipv6_fl_list = NULL;
1351 newnp->rxopt.all = np->rxopt.all;
1353 /* Clone pktoptions received with SYN */
1354 newnp->pktoptions = NULL;
1355 if (treq->pktopts != NULL) {
1356 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1357 kfree_skb(treq->pktopts);
1358 treq->pktopts = NULL;
1359 if (newnp->pktoptions)
1360 skb_set_owner_r(newnp->pktoptions, newsk);
1363 newnp->mcast_oif = inet6_iif(skb);
1364 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1365 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1367 /* Clone native IPv6 options from listening socket (if any)
1369 Yes, keeping reference count would be much more clever,
1370 but we make one more one thing there: reattach optmem
1374 newnp->opt = ipv6_dup_options(newsk, opt);
1376 sock_kfree_s(sk, opt, opt->tot_len);
1379 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1381 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1382 newnp->opt->opt_flen);
1384 tcp_mtup_init(newsk);
1385 tcp_sync_mss(newsk, dst_mtu(dst));
1386 newtp->advmss = dst_metric_advmss(dst);
1387 if (tcp_sk(sk)->rx_opt.user_mss &&
1388 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1389 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1391 tcp_initialize_rcv_mss(newsk);
1392 if (tcp_rsk(req)->snt_synack)
1393 tcp_valid_rtt_meas(newsk,
1394 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1395 newtp->total_retrans = req->retrans;
1397 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1398 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1400 #ifdef CONFIG_TCP_MD5SIG
1401 /* Copy over the MD5 key from the original socket */
1402 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1403 /* We're using one, so create a matching key
1404 * on the newsk structure. If we fail to get
1405 * memory, then we end up not copying the key
1408 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1409 AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1413 if (__inet_inherit_port(sk, newsk) < 0) {
1417 __inet6_hash(newsk, NULL);
1422 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1424 if (opt && opt != np->opt)
1425 sock_kfree_s(sk, opt, opt->tot_len);
1428 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1432 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1434 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1435 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1436 &ipv6_hdr(skb)->daddr, skb->csum)) {
1437 skb->ip_summed = CHECKSUM_UNNECESSARY;
1442 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1443 &ipv6_hdr(skb)->saddr,
1444 &ipv6_hdr(skb)->daddr, 0));
1446 if (skb->len <= 76) {
1447 return __skb_checksum_complete(skb);
1452 /* The socket must have it's spinlock held when we get
1455 * We have a potential double-lock case here, so even when
1456 * doing backlog processing we use the BH locking scheme.
1457 * This is because we cannot sleep with the original spinlock
1460 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1462 struct ipv6_pinfo *np = inet6_sk(sk);
1463 struct tcp_sock *tp;
1464 struct sk_buff *opt_skb = NULL;
1466 /* Imagine: socket is IPv6. IPv4 packet arrives,
1467 goes to IPv4 receive handler and backlogged.
1468 From backlog it always goes here. Kerboom...
1469 Fortunately, tcp_rcv_established and rcv_established
1470 handle them correctly, but it is not case with
1471 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1474 if (skb->protocol == htons(ETH_P_IP))
1475 return tcp_v4_do_rcv(sk, skb);
1477 #ifdef CONFIG_TCP_MD5SIG
1478 if (tcp_v6_inbound_md5_hash (sk, skb))
1482 if (sk_filter(sk, skb))
1486 * socket locking is here for SMP purposes as backlog rcv
1487 * is currently called with bh processing disabled.
1490 /* Do Stevens' IPV6_PKTOPTIONS.
1492 Yes, guys, it is the only place in our code, where we
1493 may make it not affecting IPv4.
1494 The rest of code is protocol independent,
1495 and I do not like idea to uglify IPv4.
1497 Actually, all the idea behind IPV6_PKTOPTIONS
1498 looks not very well thought. For now we latch
1499 options, received in the last packet, enqueued
1500 by tcp. Feel free to propose better solution.
1504 opt_skb = skb_clone(skb, GFP_ATOMIC);
1506 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1507 sock_rps_save_rxhash(sk, skb);
1508 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1511 goto ipv6_pktoptions;
1515 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1518 if (sk->sk_state == TCP_LISTEN) {
1519 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1524 * Queue it on the new socket if the new socket is active,
1525 * otherwise we just shortcircuit this and continue with
1529 sock_rps_save_rxhash(nsk, skb);
1530 if (tcp_child_process(sk, nsk, skb))
1533 __kfree_skb(opt_skb);
1537 sock_rps_save_rxhash(sk, skb);
1539 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1542 goto ipv6_pktoptions;
1546 tcp_v6_send_reset(sk, skb);
1549 __kfree_skb(opt_skb);
1553 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1558 /* Do you ask, what is it?
1560 1. skb was enqueued by tcp.
1561 2. skb is added to tail of read queue, rather than out of order.
1562 3. socket is not in passive state.
1563 4. Finally, it really contains options, which user wants to receive.
1566 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1567 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1568 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1569 np->mcast_oif = inet6_iif(opt_skb);
1570 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1571 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1572 if (np->rxopt.bits.rxtclass)
1573 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1574 if (ipv6_opt_accepted(sk, opt_skb)) {
1575 skb_set_owner_r(opt_skb, sk);
1576 opt_skb = xchg(&np->pktoptions, opt_skb);
1578 __kfree_skb(opt_skb);
1579 opt_skb = xchg(&np->pktoptions, NULL);
1587 static int tcp_v6_rcv(struct sk_buff *skb)
1589 const struct tcphdr *th;
1590 const struct ipv6hdr *hdr;
1593 struct net *net = dev_net(skb->dev);
1595 if (skb->pkt_type != PACKET_HOST)
1599 * Count it even if it's bad.
1601 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1603 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1608 if (th->doff < sizeof(struct tcphdr)/4)
1610 if (!pskb_may_pull(skb, th->doff*4))
1613 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1617 hdr = ipv6_hdr(skb);
1618 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1619 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1620 skb->len - th->doff*4);
1621 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1622 TCP_SKB_CB(skb)->when = 0;
1623 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1624 TCP_SKB_CB(skb)->sacked = 0;
1626 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1631 if (sk->sk_state == TCP_TIME_WAIT)
1634 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1635 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1636 goto discard_and_relse;
1639 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1640 goto discard_and_relse;
1642 if (sk_filter(sk, skb))
1643 goto discard_and_relse;
1647 bh_lock_sock_nested(sk);
1649 if (!sock_owned_by_user(sk)) {
1650 #ifdef CONFIG_NET_DMA
1651 struct tcp_sock *tp = tcp_sk(sk);
1652 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1653 tp->ucopy.dma_chan = net_dma_find_channel();
1654 if (tp->ucopy.dma_chan)
1655 ret = tcp_v6_do_rcv(sk, skb);
1659 if (!tcp_prequeue(sk, skb))
1660 ret = tcp_v6_do_rcv(sk, skb);
1662 } else if (unlikely(sk_add_backlog(sk, skb))) {
1664 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1665 goto discard_and_relse;
1670 return ret ? -1 : 0;
1673 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1676 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1678 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1680 tcp_v6_send_reset(NULL, skb);
1697 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1698 inet_twsk_put(inet_twsk(sk));
1702 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1703 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1704 inet_twsk_put(inet_twsk(sk));
1708 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1713 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1714 &ipv6_hdr(skb)->daddr,
1715 ntohs(th->dest), inet6_iif(skb));
1717 struct inet_timewait_sock *tw = inet_twsk(sk);
1718 inet_twsk_deschedule(tw, &tcp_death_row);
1723 /* Fall through to ACK */
1726 tcp_v6_timewait_ack(sk, skb);
1730 case TCP_TW_SUCCESS:;
1735 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1737 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1738 struct ipv6_pinfo *np = inet6_sk(sk);
1739 struct inet_peer *peer;
1742 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1743 peer = inet_getpeer_v6(&np->daddr, 1);
1747 rt6_bind_peer(rt, 1);
1748 peer = rt->rt6i_peer;
1749 *release_it = false;
1755 static void *tcp_v6_tw_get_peer(struct sock *sk)
1757 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1758 const struct inet_timewait_sock *tw = inet_twsk(sk);
1760 if (tw->tw_family == AF_INET)
1761 return tcp_v4_tw_get_peer(sk);
1763 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1766 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1767 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1768 .twsk_unique = tcp_twsk_unique,
1769 .twsk_destructor= tcp_twsk_destructor,
1770 .twsk_getpeer = tcp_v6_tw_get_peer,
1773 static const struct inet_connection_sock_af_ops ipv6_specific = {
1774 .queue_xmit = inet6_csk_xmit,
1775 .send_check = tcp_v6_send_check,
1776 .rebuild_header = inet6_sk_rebuild_header,
1777 .conn_request = tcp_v6_conn_request,
1778 .syn_recv_sock = tcp_v6_syn_recv_sock,
1779 .get_peer = tcp_v6_get_peer,
1780 .net_header_len = sizeof(struct ipv6hdr),
1781 .setsockopt = ipv6_setsockopt,
1782 .getsockopt = ipv6_getsockopt,
1783 .addr2sockaddr = inet6_csk_addr2sockaddr,
1784 .sockaddr_len = sizeof(struct sockaddr_in6),
1785 .bind_conflict = inet6_csk_bind_conflict,
1786 #ifdef CONFIG_COMPAT
1787 .compat_setsockopt = compat_ipv6_setsockopt,
1788 .compat_getsockopt = compat_ipv6_getsockopt,
1792 #ifdef CONFIG_TCP_MD5SIG
1793 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1794 .md5_lookup = tcp_v6_md5_lookup,
1795 .calc_md5_hash = tcp_v6_md5_hash_skb,
1796 .md5_parse = tcp_v6_parse_md5_keys,
1801 * TCP over IPv4 via INET6 API
1804 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1805 .queue_xmit = ip_queue_xmit,
1806 .send_check = tcp_v4_send_check,
1807 .rebuild_header = inet_sk_rebuild_header,
1808 .conn_request = tcp_v6_conn_request,
1809 .syn_recv_sock = tcp_v6_syn_recv_sock,
1810 .get_peer = tcp_v4_get_peer,
1811 .net_header_len = sizeof(struct iphdr),
1812 .setsockopt = ipv6_setsockopt,
1813 .getsockopt = ipv6_getsockopt,
1814 .addr2sockaddr = inet6_csk_addr2sockaddr,
1815 .sockaddr_len = sizeof(struct sockaddr_in6),
1816 .bind_conflict = inet6_csk_bind_conflict,
1817 #ifdef CONFIG_COMPAT
1818 .compat_setsockopt = compat_ipv6_setsockopt,
1819 .compat_getsockopt = compat_ipv6_getsockopt,
1823 #ifdef CONFIG_TCP_MD5SIG
1824 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1825 .md5_lookup = tcp_v4_md5_lookup,
1826 .calc_md5_hash = tcp_v4_md5_hash_skb,
1827 .md5_parse = tcp_v6_parse_md5_keys,
1831 /* NOTE: A lot of things set to zero explicitly by call to
1832 * sk_alloc() so need not be done here.
1834 static int tcp_v6_init_sock(struct sock *sk)
1836 struct inet_connection_sock *icsk = inet_csk(sk);
1837 struct tcp_sock *tp = tcp_sk(sk);
1839 skb_queue_head_init(&tp->out_of_order_queue);
1840 tcp_init_xmit_timers(sk);
1841 tcp_prequeue_init(tp);
1843 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1844 tp->mdev = TCP_TIMEOUT_INIT;
1846 /* So many TCP implementations out there (incorrectly) count the
1847 * initial SYN frame in their delayed-ACK and congestion control
1848 * algorithms that we must have the following bandaid to talk
1849 * efficiently to them. -DaveM
1853 /* See draft-stevens-tcpca-spec-01 for discussion of the
1854 * initialization of these values.
1856 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1857 tp->snd_cwnd_clamp = ~0;
1858 tp->mss_cache = TCP_MSS_DEFAULT;
1860 tp->reordering = sysctl_tcp_reordering;
1862 sk->sk_state = TCP_CLOSE;
1864 icsk->icsk_af_ops = &ipv6_specific;
1865 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1866 icsk->icsk_sync_mss = tcp_sync_mss;
1867 sk->sk_write_space = sk_stream_write_space;
1868 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1870 #ifdef CONFIG_TCP_MD5SIG
1871 tp->af_specific = &tcp_sock_ipv6_specific;
1874 /* TCP Cookie Transactions */
1875 if (sysctl_tcp_cookie_size > 0) {
1876 /* Default, cookies without s_data_payload. */
1878 kzalloc(sizeof(*tp->cookie_values),
1880 if (tp->cookie_values != NULL)
1881 kref_init(&tp->cookie_values->kref);
1883 /* Presumed zeroed, in order of appearance:
1884 * cookie_in_always, cookie_out_never,
1885 * s_data_constant, s_data_in, s_data_out
1887 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1888 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1891 sock_update_memcg(sk);
1892 sk_sockets_allocated_inc(sk);
1898 static void tcp_v6_destroy_sock(struct sock *sk)
1900 tcp_v4_destroy_sock(sk);
1901 inet6_destroy_sock(sk);
1904 #ifdef CONFIG_PROC_FS
1905 /* Proc filesystem TCPv6 sock list dumping. */
1906 static void get_openreq6(struct seq_file *seq,
1907 const struct sock *sk, struct request_sock *req, int i, int uid)
1909 int ttd = req->expires - jiffies;
1910 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1911 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1917 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1918 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1920 src->s6_addr32[0], src->s6_addr32[1],
1921 src->s6_addr32[2], src->s6_addr32[3],
1922 ntohs(inet_rsk(req)->loc_port),
1923 dest->s6_addr32[0], dest->s6_addr32[1],
1924 dest->s6_addr32[2], dest->s6_addr32[3],
1925 ntohs(inet_rsk(req)->rmt_port),
1927 0,0, /* could print option size, but that is af dependent. */
1928 1, /* timers active (only the expire timer) */
1929 jiffies_to_clock_t(ttd),
1932 0, /* non standard timer */
1933 0, /* open_requests have no inode */
1937 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1939 const struct in6_addr *dest, *src;
1942 unsigned long timer_expires;
1943 const struct inet_sock *inet = inet_sk(sp);
1944 const struct tcp_sock *tp = tcp_sk(sp);
1945 const struct inet_connection_sock *icsk = inet_csk(sp);
1946 const struct ipv6_pinfo *np = inet6_sk(sp);
1949 src = &np->rcv_saddr;
1950 destp = ntohs(inet->inet_dport);
1951 srcp = ntohs(inet->inet_sport);
1953 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1955 timer_expires = icsk->icsk_timeout;
1956 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1958 timer_expires = icsk->icsk_timeout;
1959 } else if (timer_pending(&sp->sk_timer)) {
1961 timer_expires = sp->sk_timer.expires;
1964 timer_expires = jiffies;
1968 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1969 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1971 src->s6_addr32[0], src->s6_addr32[1],
1972 src->s6_addr32[2], src->s6_addr32[3], srcp,
1973 dest->s6_addr32[0], dest->s6_addr32[1],
1974 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1976 tp->write_seq-tp->snd_una,
1977 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1979 jiffies_to_clock_t(timer_expires - jiffies),
1980 icsk->icsk_retransmits,
1982 icsk->icsk_probes_out,
1984 atomic_read(&sp->sk_refcnt), sp,
1985 jiffies_to_clock_t(icsk->icsk_rto),
1986 jiffies_to_clock_t(icsk->icsk_ack.ato),
1987 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1989 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1993 static void get_timewait6_sock(struct seq_file *seq,
1994 struct inet_timewait_sock *tw, int i)
1996 const struct in6_addr *dest, *src;
1998 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1999 int ttd = tw->tw_ttd - jiffies;
2004 dest = &tw6->tw_v6_daddr;
2005 src = &tw6->tw_v6_rcv_saddr;
2006 destp = ntohs(tw->tw_dport);
2007 srcp = ntohs(tw->tw_sport);
2010 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2011 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2013 src->s6_addr32[0], src->s6_addr32[1],
2014 src->s6_addr32[2], src->s6_addr32[3], srcp,
2015 dest->s6_addr32[0], dest->s6_addr32[1],
2016 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2017 tw->tw_substate, 0, 0,
2018 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2019 atomic_read(&tw->tw_refcnt), tw);
2022 static int tcp6_seq_show(struct seq_file *seq, void *v)
2024 struct tcp_iter_state *st;
2026 if (v == SEQ_START_TOKEN) {
2031 "st tx_queue rx_queue tr tm->when retrnsmt"
2032 " uid timeout inode\n");
2037 switch (st->state) {
2038 case TCP_SEQ_STATE_LISTENING:
2039 case TCP_SEQ_STATE_ESTABLISHED:
2040 get_tcp6_sock(seq, v, st->num);
2042 case TCP_SEQ_STATE_OPENREQ:
2043 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2045 case TCP_SEQ_STATE_TIME_WAIT:
2046 get_timewait6_sock(seq, v, st->num);
2053 static const struct file_operations tcp6_afinfo_seq_fops = {
2054 .owner = THIS_MODULE,
2055 .open = tcp_seq_open,
2057 .llseek = seq_lseek,
2058 .release = seq_release_net
2061 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2064 .seq_fops = &tcp6_afinfo_seq_fops,
2066 .show = tcp6_seq_show,
2070 int __net_init tcp6_proc_init(struct net *net)
2072 return tcp_proc_register(net, &tcp6_seq_afinfo);
2075 void tcp6_proc_exit(struct net *net)
2077 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2081 struct proto tcpv6_prot = {
2083 .owner = THIS_MODULE,
2085 .connect = tcp_v6_connect,
2086 .disconnect = tcp_disconnect,
2087 .accept = inet_csk_accept,
2089 .init = tcp_v6_init_sock,
2090 .destroy = tcp_v6_destroy_sock,
2091 .shutdown = tcp_shutdown,
2092 .setsockopt = tcp_setsockopt,
2093 .getsockopt = tcp_getsockopt,
2094 .recvmsg = tcp_recvmsg,
2095 .sendmsg = tcp_sendmsg,
2096 .sendpage = tcp_sendpage,
2097 .backlog_rcv = tcp_v6_do_rcv,
2098 .hash = tcp_v6_hash,
2099 .unhash = inet_unhash,
2100 .get_port = inet_csk_get_port,
2101 .enter_memory_pressure = tcp_enter_memory_pressure,
2102 .sockets_allocated = &tcp_sockets_allocated,
2103 .memory_allocated = &tcp_memory_allocated,
2104 .memory_pressure = &tcp_memory_pressure,
2105 .orphan_count = &tcp_orphan_count,
2106 .sysctl_wmem = sysctl_tcp_wmem,
2107 .sysctl_rmem = sysctl_tcp_rmem,
2108 .max_header = MAX_TCP_HEADER,
2109 .obj_size = sizeof(struct tcp6_sock),
2110 .slab_flags = SLAB_DESTROY_BY_RCU,
2111 .twsk_prot = &tcp6_timewait_sock_ops,
2112 .rsk_prot = &tcp6_request_sock_ops,
2113 .h.hashinfo = &tcp_hashinfo,
2114 .no_autobind = true,
2115 #ifdef CONFIG_COMPAT
2116 .compat_setsockopt = compat_tcp_setsockopt,
2117 .compat_getsockopt = compat_tcp_getsockopt,
2119 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2120 .proto_cgroup = tcp_proto_cgroup,
2124 static const struct inet6_protocol tcpv6_protocol = {
2125 .handler = tcp_v6_rcv,
2126 .err_handler = tcp_v6_err,
2127 .gso_send_check = tcp_v6_gso_send_check,
2128 .gso_segment = tcp_tso_segment,
2129 .gro_receive = tcp6_gro_receive,
2130 .gro_complete = tcp6_gro_complete,
2131 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2134 static struct inet_protosw tcpv6_protosw = {
2135 .type = SOCK_STREAM,
2136 .protocol = IPPROTO_TCP,
2137 .prot = &tcpv6_prot,
2138 .ops = &inet6_stream_ops,
2140 .flags = INET_PROTOSW_PERMANENT |
2144 static int __net_init tcpv6_net_init(struct net *net)
2146 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2147 SOCK_RAW, IPPROTO_TCP, net);
2150 static void __net_exit tcpv6_net_exit(struct net *net)
2152 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2155 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2157 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2160 static struct pernet_operations tcpv6_net_ops = {
2161 .init = tcpv6_net_init,
2162 .exit = tcpv6_net_exit,
2163 .exit_batch = tcpv6_net_exit_batch,
2166 int __init tcpv6_init(void)
2170 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2174 /* register inet6 protocol */
2175 ret = inet6_register_protosw(&tcpv6_protosw);
2177 goto out_tcpv6_protocol;
2179 ret = register_pernet_subsys(&tcpv6_net_ops);
2181 goto out_tcpv6_protosw;
2186 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2188 inet6_unregister_protosw(&tcpv6_protosw);
2192 void tcpv6_exit(void)
2194 unregister_pernet_subsys(&tcpv6_net_ops);
2195 inet6_unregister_protosw(&tcpv6_protosw);
2196 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);