3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
86 const struct in6_addr *addr)
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 struct dst_entry *dst = skb_dst(skb);
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
106 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
108 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
111 tcp_hdr(skb)->source);
114 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
117 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
118 struct inet_sock *inet = inet_sk(sk);
119 struct inet_connection_sock *icsk = inet_csk(sk);
120 struct ipv6_pinfo *np = inet6_sk(sk);
121 struct tcp_sock *tp = tcp_sk(sk);
122 struct in6_addr *saddr = NULL, *final_p, final;
124 struct dst_entry *dst;
128 if (addr_len < SIN6_LEN_RFC2133)
131 if (usin->sin6_family != AF_INET6)
132 return -EAFNOSUPPORT;
134 memset(&fl6, 0, sizeof(fl6));
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 struct ip6_flowlabel *flowlabel;
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
144 fl6_sock_release(flowlabel);
149 * connect() to INADDR_ANY means loopback (BSD'ism).
152 if (ipv6_addr_any(&usin->sin6_addr))
153 usin->sin6_addr.s6_addr[15] = 0x1;
155 addr_type = ipv6_addr_type(&usin->sin6_addr);
157 if (addr_type & IPV6_ADDR_MULTICAST)
160 if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 if (addr_len >= sizeof(struct sockaddr_in6) &&
162 usin->sin6_scope_id) {
163 /* If interface is set while binding, indices
166 if (sk->sk_bound_dev_if &&
167 sk->sk_bound_dev_if != usin->sin6_scope_id)
170 sk->sk_bound_dev_if = usin->sin6_scope_id;
173 /* Connect to link-local address requires an interface */
174 if (!sk->sk_bound_dev_if)
178 if (tp->rx_opt.ts_recent_stamp &&
179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180 tp->rx_opt.ts_recent = 0;
181 tp->rx_opt.ts_recent_stamp = 0;
185 sk->sk_v6_daddr = usin->sin6_addr;
186 np->flow_label = fl6.flowlabel;
192 if (addr_type == IPV6_ADDR_MAPPED) {
193 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 struct sockaddr_in sin;
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
198 if (__ipv6_only_sock(sk))
201 sin.sin_family = AF_INET;
202 sin.sin_port = usin->sin6_port;
203 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
205 icsk->icsk_af_ops = &ipv6_mapped;
206 sk->sk_backlog_rcv = tcp_v4_do_rcv;
207 #ifdef CONFIG_TCP_MD5SIG
208 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
214 icsk->icsk_ext_hdr_len = exthdrlen;
215 icsk->icsk_af_ops = &ipv6_specific;
216 sk->sk_backlog_rcv = tcp_v6_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_specific;
222 np->saddr = sk->sk_v6_rcv_saddr;
227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 saddr = &sk->sk_v6_rcv_saddr;
230 fl6.flowi6_proto = IPPROTO_TCP;
231 fl6.daddr = sk->sk_v6_daddr;
232 fl6.saddr = saddr ? *saddr : np->saddr;
233 fl6.flowi6_oif = sk->sk_bound_dev_if;
234 fl6.flowi6_mark = sk->sk_mark;
235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport;
238 final_p = fl6_update_dst(&fl6, np->opt, &final);
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
250 sk->sk_v6_rcv_saddr = *saddr;
253 /* set the source address */
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 __ip6_dst_store(sk, dst, NULL, NULL);
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
265 icsk->icsk_ext_hdr_len = 0;
267 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
272 inet->inet_dport = usin->sin6_port;
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
287 err = tcp_connect(sk);
294 tcp_set_state(sk, TCP_CLOSE);
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
302 static void tcp_v6_mtu_reduced(struct sock *sk)
304 struct dst_entry *dst;
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
332 sk = __inet6_lookup_established(net, &tcp_hashinfo,
333 &hdr->daddr, th->dest,
334 &hdr->saddr, ntohs(th->source),
338 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
343 if (sk->sk_state == TCP_TIME_WAIT) {
344 inet_twsk_put(inet_twsk(sk));
347 seq = ntohl(th->seq);
348 if (sk->sk_state == TCP_NEW_SYN_RECV)
349 return tcp_req_err(sk, seq);
352 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
353 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
355 if (sk->sk_state == TCP_CLOSE)
358 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
359 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
364 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
365 fastopen = tp->fastopen_rsk;
366 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
367 if (sk->sk_state != TCP_LISTEN &&
368 !between(seq, snd_una, tp->snd_nxt)) {
369 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 if (type == NDISC_REDIRECT) {
376 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379 dst->ops->redirect(dst, sk, skb);
383 if (type == ICMPV6_PKT_TOOBIG) {
384 /* We are not interested in TCP_LISTEN and open_requests
385 * (SYN-ACKs send out by Linux are always <576bytes so
386 * they should go through unfragmented).
388 if (sk->sk_state == TCP_LISTEN)
391 if (!ip6_sk_accept_pmtu(sk))
394 tp->mtu_info = ntohl(info);
395 if (!sock_owned_by_user(sk))
396 tcp_v6_mtu_reduced(sk);
397 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
403 icmpv6_err_convert(type, code, &err);
405 /* Might be for an request_sock */
406 switch (sk->sk_state) {
409 /* Only in fast or simultaneous open. If a fast open socket is
410 * is already accepted it is treated as a connected one below.
412 if (fastopen && !fastopen->sk)
415 if (!sock_owned_by_user(sk)) {
417 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
421 sk->sk_err_soft = err;
425 if (!sock_owned_by_user(sk) && np->recverr) {
427 sk->sk_error_report(sk);
429 sk->sk_err_soft = err;
437 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
439 struct request_sock *req,
441 struct tcp_fastopen_cookie *foc,
444 struct inet_request_sock *ireq = inet_rsk(req);
445 struct ipv6_pinfo *np = inet6_sk(sk);
446 struct flowi6 *fl6 = &fl->u.ip6;
450 /* First, grab a route. */
451 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
452 IPPROTO_TCP)) == NULL)
455 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
458 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
459 &ireq->ir_v6_rmt_addr);
461 fl6->daddr = ireq->ir_v6_rmt_addr;
462 if (np->repflow && ireq->pktopts)
463 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
465 skb_set_queue_mapping(skb, queue_mapping);
466 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
467 err = net_xmit_eval(err);
475 static void tcp_v6_reqsk_destructor(struct request_sock *req)
477 kfree_skb(inet_rsk(req)->pktopts);
480 #ifdef CONFIG_TCP_MD5SIG
481 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
482 const struct in6_addr *addr)
484 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
487 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
488 const struct sock *addr_sk)
490 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
493 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
496 struct tcp_md5sig cmd;
497 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
499 if (optlen < sizeof(cmd))
502 if (copy_from_user(&cmd, optval, sizeof(cmd)))
505 if (sin6->sin6_family != AF_INET6)
508 if (!cmd.tcpm_keylen) {
509 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
510 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
512 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
516 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
519 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
520 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
521 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
523 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
524 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
527 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
528 const struct in6_addr *daddr,
529 const struct in6_addr *saddr, int nbytes)
531 struct tcp6_pseudohdr *bp;
532 struct scatterlist sg;
534 bp = &hp->md5_blk.ip6;
535 /* 1. TCP pseudo-header (RFC2460) */
538 bp->protocol = cpu_to_be32(IPPROTO_TCP);
539 bp->len = cpu_to_be32(nbytes);
541 sg_init_one(&sg, bp, sizeof(*bp));
542 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
545 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
546 const struct in6_addr *daddr, struct in6_addr *saddr,
547 const struct tcphdr *th)
549 struct tcp_md5sig_pool *hp;
550 struct hash_desc *desc;
552 hp = tcp_get_md5sig_pool();
554 goto clear_hash_noput;
555 desc = &hp->md5_desc;
557 if (crypto_hash_init(desc))
559 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
561 if (tcp_md5_hash_header(hp, th))
563 if (tcp_md5_hash_key(hp, key))
565 if (crypto_hash_final(desc, md5_hash))
568 tcp_put_md5sig_pool();
572 tcp_put_md5sig_pool();
574 memset(md5_hash, 0, 16);
578 static int tcp_v6_md5_hash_skb(char *md5_hash,
579 const struct tcp_md5sig_key *key,
580 const struct sock *sk,
581 const struct sk_buff *skb)
583 const struct in6_addr *saddr, *daddr;
584 struct tcp_md5sig_pool *hp;
585 struct hash_desc *desc;
586 const struct tcphdr *th = tcp_hdr(skb);
588 if (sk) { /* valid for establish/request sockets */
589 saddr = &sk->sk_v6_rcv_saddr;
590 daddr = &sk->sk_v6_daddr;
592 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
593 saddr = &ip6h->saddr;
594 daddr = &ip6h->daddr;
597 hp = tcp_get_md5sig_pool();
599 goto clear_hash_noput;
600 desc = &hp->md5_desc;
602 if (crypto_hash_init(desc))
605 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
607 if (tcp_md5_hash_header(hp, th))
609 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
611 if (tcp_md5_hash_key(hp, key))
613 if (crypto_hash_final(desc, md5_hash))
616 tcp_put_md5sig_pool();
620 tcp_put_md5sig_pool();
622 memset(md5_hash, 0, 16);
628 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
629 const struct sk_buff *skb)
631 #ifdef CONFIG_TCP_MD5SIG
632 const __u8 *hash_location = NULL;
633 struct tcp_md5sig_key *hash_expected;
634 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
635 const struct tcphdr *th = tcp_hdr(skb);
639 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
640 hash_location = tcp_parse_md5sig_option(th);
642 /* We've parsed the options - do we have a hash? */
643 if (!hash_expected && !hash_location)
646 if (hash_expected && !hash_location) {
647 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
651 if (!hash_expected && hash_location) {
652 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
656 /* check the signature */
657 genhash = tcp_v6_md5_hash_skb(newhash,
661 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
662 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
663 genhash ? "failed" : "mismatch",
664 &ip6h->saddr, ntohs(th->source),
665 &ip6h->daddr, ntohs(th->dest));
672 static void tcp_v6_init_req(struct request_sock *req,
673 const struct sock *sk_listener,
676 struct inet_request_sock *ireq = inet_rsk(req);
677 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
679 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
680 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
682 /* So that link locals have meaning */
683 if (!sk_listener->sk_bound_dev_if &&
684 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
685 ireq->ir_iif = tcp_v6_iif(skb);
687 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
688 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
689 np->rxopt.bits.rxinfo ||
690 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
691 np->rxopt.bits.rxohlim || np->repflow)) {
692 atomic_inc(&skb->users);
697 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
699 const struct request_sock *req,
704 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
707 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
709 .obj_size = sizeof(struct tcp6_request_sock),
710 .rtx_syn_ack = tcp_rtx_synack,
711 .send_ack = tcp_v6_reqsk_send_ack,
712 .destructor = tcp_v6_reqsk_destructor,
713 .send_reset = tcp_v6_send_reset,
714 .syn_ack_timeout = tcp_syn_ack_timeout,
717 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
718 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
719 sizeof(struct ipv6hdr),
720 #ifdef CONFIG_TCP_MD5SIG
721 .req_md5_lookup = tcp_v6_md5_lookup,
722 .calc_md5_hash = tcp_v6_md5_hash_skb,
724 .init_req = tcp_v6_init_req,
725 #ifdef CONFIG_SYN_COOKIES
726 .cookie_init_seq = cookie_v6_init_sequence,
728 .route_req = tcp_v6_route_req,
729 .init_seq = tcp_v6_init_sequence,
730 .send_synack = tcp_v6_send_synack,
733 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
734 u32 ack, u32 win, u32 tsval, u32 tsecr,
735 int oif, struct tcp_md5sig_key *key, int rst,
736 u8 tclass, u32 label)
738 const struct tcphdr *th = tcp_hdr(skb);
740 struct sk_buff *buff;
742 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
743 struct sock *ctl_sk = net->ipv6.tcp_sk;
744 unsigned int tot_len = sizeof(struct tcphdr);
745 struct dst_entry *dst;
749 tot_len += TCPOLEN_TSTAMP_ALIGNED;
750 #ifdef CONFIG_TCP_MD5SIG
752 tot_len += TCPOLEN_MD5SIG_ALIGNED;
755 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
760 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
762 t1 = (struct tcphdr *) skb_push(buff, tot_len);
763 skb_reset_transport_header(buff);
765 /* Swap the send and the receive. */
766 memset(t1, 0, sizeof(*t1));
767 t1->dest = th->source;
768 t1->source = th->dest;
769 t1->doff = tot_len / 4;
770 t1->seq = htonl(seq);
771 t1->ack_seq = htonl(ack);
772 t1->ack = !rst || !th->ack;
774 t1->window = htons(win);
776 topt = (__be32 *)(t1 + 1);
779 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
780 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
781 *topt++ = htonl(tsval);
782 *topt++ = htonl(tsecr);
785 #ifdef CONFIG_TCP_MD5SIG
787 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
788 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
789 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
790 &ipv6_hdr(skb)->saddr,
791 &ipv6_hdr(skb)->daddr, t1);
795 memset(&fl6, 0, sizeof(fl6));
796 fl6.daddr = ipv6_hdr(skb)->saddr;
797 fl6.saddr = ipv6_hdr(skb)->daddr;
798 fl6.flowlabel = label;
800 buff->ip_summed = CHECKSUM_PARTIAL;
803 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
805 fl6.flowi6_proto = IPPROTO_TCP;
806 if (rt6_need_strict(&fl6.daddr) && !oif)
807 fl6.flowi6_oif = tcp_v6_iif(skb);
809 fl6.flowi6_oif = oif;
810 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
811 fl6.fl6_dport = t1->dest;
812 fl6.fl6_sport = t1->source;
813 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
815 /* Pass a socket to ip6_dst_lookup either it is for RST
816 * Underlying function will use this to retrieve the network
819 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
821 skb_dst_set(buff, dst);
822 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
823 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
825 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
832 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
834 const struct tcphdr *th = tcp_hdr(skb);
835 u32 seq = 0, ack_seq = 0;
836 struct tcp_md5sig_key *key = NULL;
837 #ifdef CONFIG_TCP_MD5SIG
838 const __u8 *hash_location = NULL;
839 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
840 unsigned char newhash[16];
842 struct sock *sk1 = NULL;
849 /* If sk not NULL, it means we did a successful lookup and incoming
850 * route had to be correct. prequeue might have dropped our dst.
852 if (!sk && !ipv6_unicast_destination(skb))
855 #ifdef CONFIG_TCP_MD5SIG
856 hash_location = tcp_parse_md5sig_option(th);
857 if (!sk && hash_location) {
859 * active side is lost. Try to find listening socket through
860 * source port, and then find md5 key through listening socket.
861 * we are not loose security here:
862 * Incoming packet is checked with md5 hash with finding key,
863 * no RST generated if md5 hash doesn't match.
865 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
866 &tcp_hashinfo, &ipv6h->saddr,
867 th->source, &ipv6h->daddr,
868 ntohs(th->source), tcp_v6_iif(skb));
873 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
877 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
878 if (genhash || memcmp(hash_location, newhash, 16) != 0)
881 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
886 seq = ntohl(th->ack_seq);
888 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
891 oif = sk ? sk->sk_bound_dev_if : 0;
892 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
894 #ifdef CONFIG_TCP_MD5SIG
903 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
904 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
905 struct tcp_md5sig_key *key, u8 tclass,
908 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
912 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
914 struct inet_timewait_sock *tw = inet_twsk(sk);
915 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
917 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
918 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
919 tcp_time_stamp + tcptw->tw_ts_offset,
920 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
921 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
926 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
927 struct request_sock *req)
929 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
930 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
932 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
933 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
934 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
935 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
936 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
941 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
943 #ifdef CONFIG_SYN_COOKIES
944 const struct tcphdr *th = tcp_hdr(skb);
947 sk = cookie_v6_check(sk, skb);
952 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
954 if (skb->protocol == htons(ETH_P_IP))
955 return tcp_v4_conn_request(sk, skb);
957 if (!ipv6_unicast_destination(skb))
960 return tcp_conn_request(&tcp6_request_sock_ops,
961 &tcp_request_sock_ipv6_ops, sk, skb);
964 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
965 return 0; /* don't send reset */
968 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
969 struct request_sock *req,
970 struct dst_entry *dst)
972 struct inet_request_sock *ireq;
973 struct ipv6_pinfo *newnp;
974 const struct ipv6_pinfo *np = inet6_sk(sk);
975 struct tcp6_sock *newtcp6sk;
976 struct inet_sock *newinet;
977 struct tcp_sock *newtp;
979 #ifdef CONFIG_TCP_MD5SIG
980 struct tcp_md5sig_key *key;
984 if (skb->protocol == htons(ETH_P_IP)) {
989 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
994 newtcp6sk = (struct tcp6_sock *)newsk;
995 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
997 newinet = inet_sk(newsk);
998 newnp = inet6_sk(newsk);
999 newtp = tcp_sk(newsk);
1001 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1003 newnp->saddr = newsk->sk_v6_rcv_saddr;
1005 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1006 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1007 #ifdef CONFIG_TCP_MD5SIG
1008 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1011 newnp->ipv6_ac_list = NULL;
1012 newnp->ipv6_fl_list = NULL;
1013 newnp->pktoptions = NULL;
1015 newnp->mcast_oif = tcp_v6_iif(skb);
1016 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1017 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1019 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1022 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1023 * here, tcp_create_openreq_child now does this for us, see the comment in
1024 * that function for the gory details. -acme
1027 /* It is tricky place. Until this moment IPv4 tcp
1028 worked with IPv6 icsk.icsk_af_ops.
1031 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1036 ireq = inet_rsk(req);
1038 if (sk_acceptq_is_full(sk))
1042 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1047 newsk = tcp_create_openreq_child(sk, req, skb);
1052 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1053 * count here, tcp_create_openreq_child now does this for us, see the
1054 * comment in that function for the gory details. -acme
1057 newsk->sk_gso_type = SKB_GSO_TCPV6;
1058 __ip6_dst_store(newsk, dst, NULL, NULL);
1059 inet6_sk_rx_dst_set(newsk, skb);
1061 newtcp6sk = (struct tcp6_sock *)newsk;
1062 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1064 newtp = tcp_sk(newsk);
1065 newinet = inet_sk(newsk);
1066 newnp = inet6_sk(newsk);
1068 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1070 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1071 newnp->saddr = ireq->ir_v6_loc_addr;
1072 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1073 newsk->sk_bound_dev_if = ireq->ir_iif;
1075 /* Now IPv6 options...
1077 First: no IPv4 options.
1079 newinet->inet_opt = NULL;
1080 newnp->ipv6_ac_list = NULL;
1081 newnp->ipv6_fl_list = NULL;
1084 newnp->rxopt.all = np->rxopt.all;
1086 /* Clone pktoptions received with SYN */
1087 newnp->pktoptions = NULL;
1088 if (ireq->pktopts) {
1089 newnp->pktoptions = skb_clone(ireq->pktopts,
1090 sk_gfp_atomic(sk, GFP_ATOMIC));
1091 consume_skb(ireq->pktopts);
1092 ireq->pktopts = NULL;
1093 if (newnp->pktoptions)
1094 skb_set_owner_r(newnp->pktoptions, newsk);
1097 newnp->mcast_oif = tcp_v6_iif(skb);
1098 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1099 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1101 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1103 /* Clone native IPv6 options from listening socket (if any)
1105 Yes, keeping reference count would be much more clever,
1106 but we make one more one thing there: reattach optmem
1110 newnp->opt = ipv6_dup_options(newsk, np->opt);
1112 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1114 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1115 newnp->opt->opt_flen);
1117 tcp_ca_openreq_child(newsk, dst);
1119 tcp_sync_mss(newsk, dst_mtu(dst));
1120 newtp->advmss = dst_metric_advmss(dst);
1121 if (tcp_sk(sk)->rx_opt.user_mss &&
1122 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1123 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1125 tcp_initialize_rcv_mss(newsk);
1127 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1128 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1130 #ifdef CONFIG_TCP_MD5SIG
1131 /* Copy over the MD5 key from the original socket */
1132 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1134 /* We're using one, so create a matching key
1135 * on the newsk structure. If we fail to get
1136 * memory, then we end up not copying the key
1139 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1140 AF_INET6, key->key, key->keylen,
1141 sk_gfp_atomic(sk, GFP_ATOMIC));
1145 if (__inet_inherit_port(sk, newsk) < 0) {
1146 inet_csk_prepare_forced_close(newsk);
1150 __inet_hash(newsk, NULL);
1155 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1159 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1163 /* The socket must have it's spinlock held when we get
1164 * here, unless it is a TCP_LISTEN socket.
1166 * We have a potential double-lock case here, so even when
1167 * doing backlog processing we use the BH locking scheme.
1168 * This is because we cannot sleep with the original spinlock
1171 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1173 struct ipv6_pinfo *np = inet6_sk(sk);
1174 struct tcp_sock *tp;
1175 struct sk_buff *opt_skb = NULL;
1177 /* Imagine: socket is IPv6. IPv4 packet arrives,
1178 goes to IPv4 receive handler and backlogged.
1179 From backlog it always goes here. Kerboom...
1180 Fortunately, tcp_rcv_established and rcv_established
1181 handle them correctly, but it is not case with
1182 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1185 if (skb->protocol == htons(ETH_P_IP))
1186 return tcp_v4_do_rcv(sk, skb);
1188 if (sk_filter(sk, skb))
1192 * socket locking is here for SMP purposes as backlog rcv
1193 * is currently called with bh processing disabled.
1196 /* Do Stevens' IPV6_PKTOPTIONS.
1198 Yes, guys, it is the only place in our code, where we
1199 may make it not affecting IPv4.
1200 The rest of code is protocol independent,
1201 and I do not like idea to uglify IPv4.
1203 Actually, all the idea behind IPV6_PKTOPTIONS
1204 looks not very well thought. For now we latch
1205 options, received in the last packet, enqueued
1206 by tcp. Feel free to propose better solution.
1210 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1212 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1213 struct dst_entry *dst = sk->sk_rx_dst;
1215 sock_rps_save_rxhash(sk, skb);
1216 sk_mark_napi_id(sk, skb);
1218 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1219 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1221 sk->sk_rx_dst = NULL;
1225 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1227 goto ipv6_pktoptions;
1231 if (tcp_checksum_complete(skb))
1234 if (sk->sk_state == TCP_LISTEN) {
1235 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1241 sock_rps_save_rxhash(nsk, skb);
1242 sk_mark_napi_id(nsk, skb);
1243 if (tcp_child_process(sk, nsk, skb))
1246 __kfree_skb(opt_skb);
1250 sock_rps_save_rxhash(sk, skb);
1252 if (tcp_rcv_state_process(sk, skb))
1255 goto ipv6_pktoptions;
1259 tcp_v6_send_reset(sk, skb);
1262 __kfree_skb(opt_skb);
1266 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1267 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1272 /* Do you ask, what is it?
1274 1. skb was enqueued by tcp.
1275 2. skb is added to tail of read queue, rather than out of order.
1276 3. socket is not in passive state.
1277 4. Finally, it really contains options, which user wants to receive.
1280 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1281 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1282 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1283 np->mcast_oif = tcp_v6_iif(opt_skb);
1284 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1285 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1286 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1287 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1289 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1290 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1291 skb_set_owner_r(opt_skb, sk);
1292 opt_skb = xchg(&np->pktoptions, opt_skb);
1294 __kfree_skb(opt_skb);
1295 opt_skb = xchg(&np->pktoptions, NULL);
1303 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1304 const struct tcphdr *th)
1306 /* This is tricky: we move IP6CB at its correct location into
1307 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1308 * _decode_session6() uses IP6CB().
1309 * barrier() makes sure compiler won't play aliasing games.
1311 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1312 sizeof(struct inet6_skb_parm));
1315 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1316 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1317 skb->len - th->doff*4);
1318 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1319 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1320 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1321 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1322 TCP_SKB_CB(skb)->sacked = 0;
1325 static void tcp_v6_restore_cb(struct sk_buff *skb)
1327 /* We need to move header back to the beginning if xfrm6_policy_check()
1328 * and tcp_v6_fill_cb() are going to be called again.
1330 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1331 sizeof(struct inet6_skb_parm));
1334 static int tcp_v6_rcv(struct sk_buff *skb)
1336 const struct tcphdr *th;
1337 const struct ipv6hdr *hdr;
1340 struct net *net = dev_net(skb->dev);
1342 if (skb->pkt_type != PACKET_HOST)
1346 * Count it even if it's bad.
1348 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1350 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1355 if (th->doff < sizeof(struct tcphdr)/4)
1357 if (!pskb_may_pull(skb, th->doff*4))
1360 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1364 hdr = ipv6_hdr(skb);
1366 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1372 if (sk->sk_state == TCP_TIME_WAIT)
1375 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1376 struct request_sock *req = inet_reqsk(sk);
1377 struct sock *nsk = NULL;
1379 sk = req->rsk_listener;
1380 tcp_v6_fill_cb(skb, hdr, th);
1381 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1385 if (sk->sk_state == TCP_LISTEN)
1386 nsk = tcp_check_req(sk, skb, req, false);
1394 tcp_v6_restore_cb(skb);
1395 } else if (tcp_child_process(sk, nsk, skb)) {
1396 tcp_v6_send_reset(nsk, skb);
1402 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1403 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1404 goto discard_and_relse;
1407 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1408 goto discard_and_relse;
1410 tcp_v6_fill_cb(skb, hdr, th);
1412 if (tcp_v6_inbound_md5_hash(sk, skb))
1413 goto discard_and_relse;
1415 if (sk_filter(sk, skb))
1416 goto discard_and_relse;
1420 if (sk->sk_state == TCP_LISTEN) {
1421 ret = tcp_v6_do_rcv(sk, skb);
1422 goto put_and_return;
1425 sk_incoming_cpu_update(sk);
1427 bh_lock_sock_nested(sk);
1428 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1430 if (!sock_owned_by_user(sk)) {
1431 if (!tcp_prequeue(sk, skb))
1432 ret = tcp_v6_do_rcv(sk, skb);
1433 } else if (unlikely(sk_add_backlog(sk, skb,
1434 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1436 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1437 goto discard_and_relse;
1443 return ret ? -1 : 0;
1446 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1449 tcp_v6_fill_cb(skb, hdr, th);
1451 if (tcp_checksum_complete(skb)) {
1453 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1455 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1457 tcp_v6_send_reset(NULL, skb);
1469 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1470 inet_twsk_put(inet_twsk(sk));
1474 tcp_v6_fill_cb(skb, hdr, th);
1476 if (tcp_checksum_complete(skb)) {
1477 inet_twsk_put(inet_twsk(sk));
1481 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1486 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1487 &ipv6_hdr(skb)->saddr, th->source,
1488 &ipv6_hdr(skb)->daddr,
1489 ntohs(th->dest), tcp_v6_iif(skb));
1491 struct inet_timewait_sock *tw = inet_twsk(sk);
1492 inet_twsk_deschedule_put(tw);
1494 tcp_v6_restore_cb(skb);
1497 /* Fall through to ACK */
1500 tcp_v6_timewait_ack(sk, skb);
1503 tcp_v6_restore_cb(skb);
1505 case TCP_TW_SUCCESS:
1511 static void tcp_v6_early_demux(struct sk_buff *skb)
1513 const struct ipv6hdr *hdr;
1514 const struct tcphdr *th;
1517 if (skb->pkt_type != PACKET_HOST)
1520 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1523 hdr = ipv6_hdr(skb);
1526 if (th->doff < sizeof(struct tcphdr) / 4)
1529 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1530 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1531 &hdr->saddr, th->source,
1532 &hdr->daddr, ntohs(th->dest),
1536 skb->destructor = sock_edemux;
1537 if (sk_fullsock(sk)) {
1538 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1541 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1543 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1544 skb_dst_set_noref(skb, dst);
1549 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1550 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1551 .twsk_unique = tcp_twsk_unique,
1552 .twsk_destructor = tcp_twsk_destructor,
1555 static const struct inet_connection_sock_af_ops ipv6_specific = {
1556 .queue_xmit = inet6_csk_xmit,
1557 .send_check = tcp_v6_send_check,
1558 .rebuild_header = inet6_sk_rebuild_header,
1559 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1560 .conn_request = tcp_v6_conn_request,
1561 .syn_recv_sock = tcp_v6_syn_recv_sock,
1562 .net_header_len = sizeof(struct ipv6hdr),
1563 .net_frag_header_len = sizeof(struct frag_hdr),
1564 .setsockopt = ipv6_setsockopt,
1565 .getsockopt = ipv6_getsockopt,
1566 .addr2sockaddr = inet6_csk_addr2sockaddr,
1567 .sockaddr_len = sizeof(struct sockaddr_in6),
1568 .bind_conflict = inet6_csk_bind_conflict,
1569 #ifdef CONFIG_COMPAT
1570 .compat_setsockopt = compat_ipv6_setsockopt,
1571 .compat_getsockopt = compat_ipv6_getsockopt,
1573 .mtu_reduced = tcp_v6_mtu_reduced,
1576 #ifdef CONFIG_TCP_MD5SIG
1577 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1578 .md5_lookup = tcp_v6_md5_lookup,
1579 .calc_md5_hash = tcp_v6_md5_hash_skb,
1580 .md5_parse = tcp_v6_parse_md5_keys,
1585 * TCP over IPv4 via INET6 API
1587 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1588 .queue_xmit = ip_queue_xmit,
1589 .send_check = tcp_v4_send_check,
1590 .rebuild_header = inet_sk_rebuild_header,
1591 .sk_rx_dst_set = inet_sk_rx_dst_set,
1592 .conn_request = tcp_v6_conn_request,
1593 .syn_recv_sock = tcp_v6_syn_recv_sock,
1594 .net_header_len = sizeof(struct iphdr),
1595 .setsockopt = ipv6_setsockopt,
1596 .getsockopt = ipv6_getsockopt,
1597 .addr2sockaddr = inet6_csk_addr2sockaddr,
1598 .sockaddr_len = sizeof(struct sockaddr_in6),
1599 .bind_conflict = inet6_csk_bind_conflict,
1600 #ifdef CONFIG_COMPAT
1601 .compat_setsockopt = compat_ipv6_setsockopt,
1602 .compat_getsockopt = compat_ipv6_getsockopt,
1604 .mtu_reduced = tcp_v4_mtu_reduced,
1607 #ifdef CONFIG_TCP_MD5SIG
1608 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1609 .md5_lookup = tcp_v4_md5_lookup,
1610 .calc_md5_hash = tcp_v4_md5_hash_skb,
1611 .md5_parse = tcp_v6_parse_md5_keys,
1615 /* NOTE: A lot of things set to zero explicitly by call to
1616 * sk_alloc() so need not be done here.
1618 static int tcp_v6_init_sock(struct sock *sk)
1620 struct inet_connection_sock *icsk = inet_csk(sk);
1624 icsk->icsk_af_ops = &ipv6_specific;
1626 #ifdef CONFIG_TCP_MD5SIG
1627 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1633 static void tcp_v6_destroy_sock(struct sock *sk)
1635 tcp_v4_destroy_sock(sk);
1636 inet6_destroy_sock(sk);
1639 #ifdef CONFIG_PROC_FS
1640 /* Proc filesystem TCPv6 sock list dumping. */
1641 static void get_openreq6(struct seq_file *seq,
1642 const struct request_sock *req, int i)
1644 long ttd = req->rsk_timer.expires - jiffies;
1645 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1646 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1652 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1653 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1655 src->s6_addr32[0], src->s6_addr32[1],
1656 src->s6_addr32[2], src->s6_addr32[3],
1657 inet_rsk(req)->ir_num,
1658 dest->s6_addr32[0], dest->s6_addr32[1],
1659 dest->s6_addr32[2], dest->s6_addr32[3],
1660 ntohs(inet_rsk(req)->ir_rmt_port),
1662 0, 0, /* could print option size, but that is af dependent. */
1663 1, /* timers active (only the expire timer) */
1664 jiffies_to_clock_t(ttd),
1666 from_kuid_munged(seq_user_ns(seq),
1667 sock_i_uid(req->rsk_listener)),
1668 0, /* non standard timer */
1669 0, /* open_requests have no inode */
1673 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1675 const struct in6_addr *dest, *src;
1678 unsigned long timer_expires;
1679 const struct inet_sock *inet = inet_sk(sp);
1680 const struct tcp_sock *tp = tcp_sk(sp);
1681 const struct inet_connection_sock *icsk = inet_csk(sp);
1682 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1684 dest = &sp->sk_v6_daddr;
1685 src = &sp->sk_v6_rcv_saddr;
1686 destp = ntohs(inet->inet_dport);
1687 srcp = ntohs(inet->inet_sport);
1689 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1691 timer_expires = icsk->icsk_timeout;
1692 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1694 timer_expires = icsk->icsk_timeout;
1695 } else if (timer_pending(&sp->sk_timer)) {
1697 timer_expires = sp->sk_timer.expires;
1700 timer_expires = jiffies;
1704 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1705 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1707 src->s6_addr32[0], src->s6_addr32[1],
1708 src->s6_addr32[2], src->s6_addr32[3], srcp,
1709 dest->s6_addr32[0], dest->s6_addr32[1],
1710 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1712 tp->write_seq-tp->snd_una,
1713 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1715 jiffies_delta_to_clock_t(timer_expires - jiffies),
1716 icsk->icsk_retransmits,
1717 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1718 icsk->icsk_probes_out,
1720 atomic_read(&sp->sk_refcnt), sp,
1721 jiffies_to_clock_t(icsk->icsk_rto),
1722 jiffies_to_clock_t(icsk->icsk_ack.ato),
1723 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1725 sp->sk_state == TCP_LISTEN ?
1726 fastopenq->max_qlen :
1727 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1731 static void get_timewait6_sock(struct seq_file *seq,
1732 struct inet_timewait_sock *tw, int i)
1734 long delta = tw->tw_timer.expires - jiffies;
1735 const struct in6_addr *dest, *src;
1738 dest = &tw->tw_v6_daddr;
1739 src = &tw->tw_v6_rcv_saddr;
1740 destp = ntohs(tw->tw_dport);
1741 srcp = ntohs(tw->tw_sport);
1744 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1745 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1747 src->s6_addr32[0], src->s6_addr32[1],
1748 src->s6_addr32[2], src->s6_addr32[3], srcp,
1749 dest->s6_addr32[0], dest->s6_addr32[1],
1750 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1751 tw->tw_substate, 0, 0,
1752 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1753 atomic_read(&tw->tw_refcnt), tw);
1756 static int tcp6_seq_show(struct seq_file *seq, void *v)
1758 struct tcp_iter_state *st;
1759 struct sock *sk = v;
1761 if (v == SEQ_START_TOKEN) {
1766 "st tx_queue rx_queue tr tm->when retrnsmt"
1767 " uid timeout inode\n");
1772 if (sk->sk_state == TCP_TIME_WAIT)
1773 get_timewait6_sock(seq, v, st->num);
1774 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1775 get_openreq6(seq, v, st->num);
1777 get_tcp6_sock(seq, v, st->num);
1782 static const struct file_operations tcp6_afinfo_seq_fops = {
1783 .owner = THIS_MODULE,
1784 .open = tcp_seq_open,
1786 .llseek = seq_lseek,
1787 .release = seq_release_net
1790 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1793 .seq_fops = &tcp6_afinfo_seq_fops,
1795 .show = tcp6_seq_show,
1799 int __net_init tcp6_proc_init(struct net *net)
1801 return tcp_proc_register(net, &tcp6_seq_afinfo);
1804 void tcp6_proc_exit(struct net *net)
1806 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1810 static void tcp_v6_clear_sk(struct sock *sk, int size)
1812 struct inet_sock *inet = inet_sk(sk);
1814 /* we do not want to clear pinet6 field, because of RCU lookups */
1815 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1817 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1818 memset(&inet->pinet6 + 1, 0, size);
1821 struct proto tcpv6_prot = {
1823 .owner = THIS_MODULE,
1825 .connect = tcp_v6_connect,
1826 .disconnect = tcp_disconnect,
1827 .accept = inet_csk_accept,
1829 .init = tcp_v6_init_sock,
1830 .destroy = tcp_v6_destroy_sock,
1831 .shutdown = tcp_shutdown,
1832 .setsockopt = tcp_setsockopt,
1833 .getsockopt = tcp_getsockopt,
1834 .recvmsg = tcp_recvmsg,
1835 .sendmsg = tcp_sendmsg,
1836 .sendpage = tcp_sendpage,
1837 .backlog_rcv = tcp_v6_do_rcv,
1838 .release_cb = tcp_release_cb,
1840 .unhash = inet_unhash,
1841 .get_port = inet_csk_get_port,
1842 .enter_memory_pressure = tcp_enter_memory_pressure,
1843 .stream_memory_free = tcp_stream_memory_free,
1844 .sockets_allocated = &tcp_sockets_allocated,
1845 .memory_allocated = &tcp_memory_allocated,
1846 .memory_pressure = &tcp_memory_pressure,
1847 .orphan_count = &tcp_orphan_count,
1848 .sysctl_mem = sysctl_tcp_mem,
1849 .sysctl_wmem = sysctl_tcp_wmem,
1850 .sysctl_rmem = sysctl_tcp_rmem,
1851 .max_header = MAX_TCP_HEADER,
1852 .obj_size = sizeof(struct tcp6_sock),
1853 .slab_flags = SLAB_DESTROY_BY_RCU,
1854 .twsk_prot = &tcp6_timewait_sock_ops,
1855 .rsk_prot = &tcp6_request_sock_ops,
1856 .h.hashinfo = &tcp_hashinfo,
1857 .no_autobind = true,
1858 #ifdef CONFIG_COMPAT
1859 .compat_setsockopt = compat_tcp_setsockopt,
1860 .compat_getsockopt = compat_tcp_getsockopt,
1862 #ifdef CONFIG_MEMCG_KMEM
1863 .proto_cgroup = tcp_proto_cgroup,
1865 .clear_sk = tcp_v6_clear_sk,
1868 static const struct inet6_protocol tcpv6_protocol = {
1869 .early_demux = tcp_v6_early_demux,
1870 .handler = tcp_v6_rcv,
1871 .err_handler = tcp_v6_err,
1872 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1875 static struct inet_protosw tcpv6_protosw = {
1876 .type = SOCK_STREAM,
1877 .protocol = IPPROTO_TCP,
1878 .prot = &tcpv6_prot,
1879 .ops = &inet6_stream_ops,
1880 .flags = INET_PROTOSW_PERMANENT |
1884 static int __net_init tcpv6_net_init(struct net *net)
1886 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1887 SOCK_RAW, IPPROTO_TCP, net);
1890 static void __net_exit tcpv6_net_exit(struct net *net)
1892 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1895 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1897 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1900 static struct pernet_operations tcpv6_net_ops = {
1901 .init = tcpv6_net_init,
1902 .exit = tcpv6_net_exit,
1903 .exit_batch = tcpv6_net_exit_batch,
1906 int __init tcpv6_init(void)
1910 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1914 /* register inet6 protocol */
1915 ret = inet6_register_protosw(&tcpv6_protosw);
1917 goto out_tcpv6_protocol;
1919 ret = register_pernet_subsys(&tcpv6_net_ops);
1921 goto out_tcpv6_protosw;
1926 inet6_unregister_protosw(&tcpv6_protosw);
1928 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1932 void tcpv6_exit(void)
1934 unregister_pernet_subsys(&tcpv6_net_ops);
1935 inet6_unregister_protosw(&tcpv6_protosw);
1936 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);