3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 struct dst_entry *dst = skb_dst(skb);
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
106 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
108 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
111 tcp_hdr(skb)->source);
114 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
117 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
118 struct inet_sock *inet = inet_sk(sk);
119 struct inet_connection_sock *icsk = inet_csk(sk);
120 struct ipv6_pinfo *np = inet6_sk(sk);
121 struct tcp_sock *tp = tcp_sk(sk);
122 struct in6_addr *saddr = NULL, *final_p, final;
124 struct dst_entry *dst;
128 if (addr_len < SIN6_LEN_RFC2133)
131 if (usin->sin6_family != AF_INET6)
132 return -EAFNOSUPPORT;
134 memset(&fl6, 0, sizeof(fl6));
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 struct ip6_flowlabel *flowlabel;
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
144 fl6_sock_release(flowlabel);
149 * connect() to INADDR_ANY means loopback (BSD'ism).
152 if (ipv6_addr_any(&usin->sin6_addr))
153 usin->sin6_addr.s6_addr[15] = 0x1;
155 addr_type = ipv6_addr_type(&usin->sin6_addr);
157 if (addr_type & IPV6_ADDR_MULTICAST)
160 if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 if (addr_len >= sizeof(struct sockaddr_in6) &&
162 usin->sin6_scope_id) {
163 /* If interface is set while binding, indices
166 if (sk->sk_bound_dev_if &&
167 sk->sk_bound_dev_if != usin->sin6_scope_id)
170 sk->sk_bound_dev_if = usin->sin6_scope_id;
173 /* Connect to link-local address requires an interface */
174 if (!sk->sk_bound_dev_if)
178 if (tp->rx_opt.ts_recent_stamp &&
179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180 tp->rx_opt.ts_recent = 0;
181 tp->rx_opt.ts_recent_stamp = 0;
185 sk->sk_v6_daddr = usin->sin6_addr;
186 np->flow_label = fl6.flowlabel;
192 if (addr_type == IPV6_ADDR_MAPPED) {
193 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 struct sockaddr_in sin;
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
198 if (__ipv6_only_sock(sk))
201 sin.sin_family = AF_INET;
202 sin.sin_port = usin->sin6_port;
203 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
205 icsk->icsk_af_ops = &ipv6_mapped;
206 sk->sk_backlog_rcv = tcp_v4_do_rcv;
207 #ifdef CONFIG_TCP_MD5SIG
208 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
214 icsk->icsk_ext_hdr_len = exthdrlen;
215 icsk->icsk_af_ops = &ipv6_specific;
216 sk->sk_backlog_rcv = tcp_v6_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_specific;
222 np->saddr = sk->sk_v6_rcv_saddr;
227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 saddr = &sk->sk_v6_rcv_saddr;
230 fl6.flowi6_proto = IPPROTO_TCP;
231 fl6.daddr = sk->sk_v6_daddr;
232 fl6.saddr = saddr ? *saddr : np->saddr;
233 fl6.flowi6_oif = sk->sk_bound_dev_if;
234 fl6.flowi6_mark = sk->sk_mark;
235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport;
238 final_p = fl6_update_dst(&fl6, np->opt, &final);
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
250 sk->sk_v6_rcv_saddr = *saddr;
253 /* set the source address */
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 __ip6_dst_store(sk, dst, NULL, NULL);
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
265 icsk->icsk_ext_hdr_len = 0;
267 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
272 inet->inet_dport = usin->sin6_port;
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
287 err = tcp_connect(sk);
294 tcp_set_state(sk, TCP_CLOSE);
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
302 static void tcp_v6_mtu_reduced(struct sock *sk)
304 struct dst_entry *dst;
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
332 sk = __inet6_lookup_established(net, &tcp_hashinfo,
333 &hdr->daddr, th->dest,
334 &hdr->saddr, ntohs(th->source),
338 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
343 if (sk->sk_state == TCP_TIME_WAIT) {
344 inet_twsk_put(inet_twsk(sk));
347 seq = ntohl(th->seq);
348 if (sk->sk_state == TCP_NEW_SYN_RECV)
349 return tcp_req_err(sk, seq);
352 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
353 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
355 if (sk->sk_state == TCP_CLOSE)
358 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
359 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
364 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
365 fastopen = tp->fastopen_rsk;
366 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
367 if (sk->sk_state != TCP_LISTEN &&
368 !between(seq, snd_una, tp->snd_nxt)) {
369 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 if (type == NDISC_REDIRECT) {
376 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379 dst->ops->redirect(dst, sk, skb);
383 if (type == ICMPV6_PKT_TOOBIG) {
384 /* We are not interested in TCP_LISTEN and open_requests
385 * (SYN-ACKs send out by Linux are always <576bytes so
386 * they should go through unfragmented).
388 if (sk->sk_state == TCP_LISTEN)
391 if (!ip6_sk_accept_pmtu(sk))
394 tp->mtu_info = ntohl(info);
395 if (!sock_owned_by_user(sk))
396 tcp_v6_mtu_reduced(sk);
397 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
403 icmpv6_err_convert(type, code, &err);
405 /* Might be for an request_sock */
406 switch (sk->sk_state) {
409 /* Only in fast or simultaneous open. If a fast open socket is
410 * is already accepted it is treated as a connected one below.
412 if (fastopen && !fastopen->sk)
415 if (!sock_owned_by_user(sk)) {
417 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
421 sk->sk_err_soft = err;
425 if (!sock_owned_by_user(sk) && np->recverr) {
427 sk->sk_error_report(sk);
429 sk->sk_err_soft = err;
437 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
439 struct request_sock *req,
441 struct tcp_fastopen_cookie *foc)
443 struct inet_request_sock *ireq = inet_rsk(req);
444 struct ipv6_pinfo *np = inet6_sk(sk);
445 struct flowi6 *fl6 = &fl->u.ip6;
449 /* First, grab a route. */
450 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
453 skb = tcp_make_synack(sk, dst, req, foc);
456 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
457 &ireq->ir_v6_rmt_addr);
459 fl6->daddr = ireq->ir_v6_rmt_addr;
460 if (np->repflow && ireq->pktopts)
461 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
463 skb_set_queue_mapping(skb, queue_mapping);
464 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
465 err = net_xmit_eval(err);
473 static void tcp_v6_reqsk_destructor(struct request_sock *req)
475 kfree_skb(inet_rsk(req)->pktopts);
478 #ifdef CONFIG_TCP_MD5SIG
479 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
480 const struct in6_addr *addr)
482 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
485 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
486 const struct sock *addr_sk)
488 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
491 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
494 struct tcp_md5sig cmd;
495 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
497 if (optlen < sizeof(cmd))
500 if (copy_from_user(&cmd, optval, sizeof(cmd)))
503 if (sin6->sin6_family != AF_INET6)
506 if (!cmd.tcpm_keylen) {
507 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
508 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
510 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
514 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
517 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
518 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
519 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
521 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
522 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
525 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
526 const struct in6_addr *daddr,
527 const struct in6_addr *saddr, int nbytes)
529 struct tcp6_pseudohdr *bp;
530 struct scatterlist sg;
532 bp = &hp->md5_blk.ip6;
533 /* 1. TCP pseudo-header (RFC2460) */
536 bp->protocol = cpu_to_be32(IPPROTO_TCP);
537 bp->len = cpu_to_be32(nbytes);
539 sg_init_one(&sg, bp, sizeof(*bp));
540 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
543 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
544 const struct in6_addr *daddr, struct in6_addr *saddr,
545 const struct tcphdr *th)
547 struct tcp_md5sig_pool *hp;
548 struct hash_desc *desc;
550 hp = tcp_get_md5sig_pool();
552 goto clear_hash_noput;
553 desc = &hp->md5_desc;
555 if (crypto_hash_init(desc))
557 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
559 if (tcp_md5_hash_header(hp, th))
561 if (tcp_md5_hash_key(hp, key))
563 if (crypto_hash_final(desc, md5_hash))
566 tcp_put_md5sig_pool();
570 tcp_put_md5sig_pool();
572 memset(md5_hash, 0, 16);
576 static int tcp_v6_md5_hash_skb(char *md5_hash,
577 const struct tcp_md5sig_key *key,
578 const struct sock *sk,
579 const struct sk_buff *skb)
581 const struct in6_addr *saddr, *daddr;
582 struct tcp_md5sig_pool *hp;
583 struct hash_desc *desc;
584 const struct tcphdr *th = tcp_hdr(skb);
586 if (sk) { /* valid for establish/request sockets */
587 saddr = &sk->sk_v6_rcv_saddr;
588 daddr = &sk->sk_v6_daddr;
590 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
591 saddr = &ip6h->saddr;
592 daddr = &ip6h->daddr;
595 hp = tcp_get_md5sig_pool();
597 goto clear_hash_noput;
598 desc = &hp->md5_desc;
600 if (crypto_hash_init(desc))
603 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
605 if (tcp_md5_hash_header(hp, th))
607 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
609 if (tcp_md5_hash_key(hp, key))
611 if (crypto_hash_final(desc, md5_hash))
614 tcp_put_md5sig_pool();
618 tcp_put_md5sig_pool();
620 memset(md5_hash, 0, 16);
624 static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
626 const __u8 *hash_location = NULL;
627 struct tcp_md5sig_key *hash_expected;
628 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
629 const struct tcphdr *th = tcp_hdr(skb);
633 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
634 hash_location = tcp_parse_md5sig_option(th);
636 /* We've parsed the options - do we have a hash? */
637 if (!hash_expected && !hash_location)
640 if (hash_expected && !hash_location) {
641 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
645 if (!hash_expected && hash_location) {
646 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
650 /* check the signature */
651 genhash = tcp_v6_md5_hash_skb(newhash,
655 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
656 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
657 genhash ? "failed" : "mismatch",
658 &ip6h->saddr, ntohs(th->source),
659 &ip6h->daddr, ntohs(th->dest));
666 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
669 struct inet_request_sock *ireq = inet_rsk(req);
670 struct ipv6_pinfo *np = inet6_sk(sk);
672 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
673 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
675 /* So that link locals have meaning */
676 if (!sk->sk_bound_dev_if &&
677 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
678 ireq->ir_iif = tcp_v6_iif(skb);
680 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
681 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
682 np->rxopt.bits.rxinfo ||
683 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
684 np->rxopt.bits.rxohlim || np->repflow)) {
685 atomic_inc(&skb->users);
690 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
691 const struct request_sock *req,
696 return inet6_csk_route_req(sk, &fl->u.ip6, req);
699 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
701 .obj_size = sizeof(struct tcp6_request_sock),
702 .rtx_syn_ack = tcp_rtx_synack,
703 .send_ack = tcp_v6_reqsk_send_ack,
704 .destructor = tcp_v6_reqsk_destructor,
705 .send_reset = tcp_v6_send_reset,
706 .syn_ack_timeout = tcp_syn_ack_timeout,
709 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
710 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
711 sizeof(struct ipv6hdr),
712 #ifdef CONFIG_TCP_MD5SIG
713 .req_md5_lookup = tcp_v6_md5_lookup,
714 .calc_md5_hash = tcp_v6_md5_hash_skb,
716 .init_req = tcp_v6_init_req,
717 #ifdef CONFIG_SYN_COOKIES
718 .cookie_init_seq = cookie_v6_init_sequence,
720 .route_req = tcp_v6_route_req,
721 .init_seq = tcp_v6_init_sequence,
722 .send_synack = tcp_v6_send_synack,
723 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
726 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
727 u32 ack, u32 win, u32 tsval, u32 tsecr,
728 int oif, struct tcp_md5sig_key *key, int rst,
729 u8 tclass, u32 label)
731 const struct tcphdr *th = tcp_hdr(skb);
733 struct sk_buff *buff;
735 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
736 struct sock *ctl_sk = net->ipv6.tcp_sk;
737 unsigned int tot_len = sizeof(struct tcphdr);
738 struct dst_entry *dst;
742 tot_len += TCPOLEN_TSTAMP_ALIGNED;
743 #ifdef CONFIG_TCP_MD5SIG
745 tot_len += TCPOLEN_MD5SIG_ALIGNED;
748 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
753 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
755 t1 = (struct tcphdr *) skb_push(buff, tot_len);
756 skb_reset_transport_header(buff);
758 /* Swap the send and the receive. */
759 memset(t1, 0, sizeof(*t1));
760 t1->dest = th->source;
761 t1->source = th->dest;
762 t1->doff = tot_len / 4;
763 t1->seq = htonl(seq);
764 t1->ack_seq = htonl(ack);
765 t1->ack = !rst || !th->ack;
767 t1->window = htons(win);
769 topt = (__be32 *)(t1 + 1);
772 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
773 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
774 *topt++ = htonl(tsval);
775 *topt++ = htonl(tsecr);
778 #ifdef CONFIG_TCP_MD5SIG
780 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
781 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
782 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
783 &ipv6_hdr(skb)->saddr,
784 &ipv6_hdr(skb)->daddr, t1);
788 memset(&fl6, 0, sizeof(fl6));
789 fl6.daddr = ipv6_hdr(skb)->saddr;
790 fl6.saddr = ipv6_hdr(skb)->daddr;
791 fl6.flowlabel = label;
793 buff->ip_summed = CHECKSUM_PARTIAL;
796 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
798 fl6.flowi6_proto = IPPROTO_TCP;
799 if (rt6_need_strict(&fl6.daddr) && !oif)
800 fl6.flowi6_oif = tcp_v6_iif(skb);
802 fl6.flowi6_oif = oif;
803 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
804 fl6.fl6_dport = t1->dest;
805 fl6.fl6_sport = t1->source;
806 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
808 /* Pass a socket to ip6_dst_lookup either it is for RST
809 * Underlying function will use this to retrieve the network
812 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
814 skb_dst_set(buff, dst);
815 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
816 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
818 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
825 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
827 const struct tcphdr *th = tcp_hdr(skb);
828 u32 seq = 0, ack_seq = 0;
829 struct tcp_md5sig_key *key = NULL;
830 #ifdef CONFIG_TCP_MD5SIG
831 const __u8 *hash_location = NULL;
832 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
833 unsigned char newhash[16];
835 struct sock *sk1 = NULL;
842 /* If sk not NULL, it means we did a successful lookup and incoming
843 * route had to be correct. prequeue might have dropped our dst.
845 if (!sk && !ipv6_unicast_destination(skb))
848 #ifdef CONFIG_TCP_MD5SIG
849 hash_location = tcp_parse_md5sig_option(th);
850 if (!sk && hash_location) {
852 * active side is lost. Try to find listening socket through
853 * source port, and then find md5 key through listening socket.
854 * we are not loose security here:
855 * Incoming packet is checked with md5 hash with finding key,
856 * no RST generated if md5 hash doesn't match.
858 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
859 &tcp_hashinfo, &ipv6h->saddr,
860 th->source, &ipv6h->daddr,
861 ntohs(th->source), tcp_v6_iif(skb));
866 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
870 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
871 if (genhash || memcmp(hash_location, newhash, 16) != 0)
874 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
879 seq = ntohl(th->ack_seq);
881 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
884 oif = sk ? sk->sk_bound_dev_if : 0;
885 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
887 #ifdef CONFIG_TCP_MD5SIG
896 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
897 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
898 struct tcp_md5sig_key *key, u8 tclass,
901 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
905 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
907 struct inet_timewait_sock *tw = inet_twsk(sk);
908 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
910 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
911 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
912 tcp_time_stamp + tcptw->tw_ts_offset,
913 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
914 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
919 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
920 struct request_sock *req)
922 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
923 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
925 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
926 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
927 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
928 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
929 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
934 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
936 const struct tcphdr *th = tcp_hdr(skb);
937 struct request_sock *req;
940 /* Find possible connection requests. */
941 req = inet6_csk_search_req(sk, th->source,
942 &ipv6_hdr(skb)->saddr,
943 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
945 nsk = tcp_check_req(sk, skb, req, false);
946 if (!nsk || nsk == sk)
950 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
951 &ipv6_hdr(skb)->saddr, th->source,
952 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
956 if (nsk->sk_state != TCP_TIME_WAIT) {
960 inet_twsk_put(inet_twsk(nsk));
964 #ifdef CONFIG_SYN_COOKIES
966 sk = cookie_v6_check(sk, skb);
971 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
973 if (skb->protocol == htons(ETH_P_IP))
974 return tcp_v4_conn_request(sk, skb);
976 if (!ipv6_unicast_destination(skb))
979 return tcp_conn_request(&tcp6_request_sock_ops,
980 &tcp_request_sock_ipv6_ops, sk, skb);
983 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
984 return 0; /* don't send reset */
987 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
988 struct request_sock *req,
989 struct dst_entry *dst)
991 struct inet_request_sock *ireq;
992 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
993 struct tcp6_sock *newtcp6sk;
994 struct inet_sock *newinet;
995 struct tcp_sock *newtp;
997 #ifdef CONFIG_TCP_MD5SIG
998 struct tcp_md5sig_key *key;
1002 if (skb->protocol == htons(ETH_P_IP)) {
1007 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1012 newtcp6sk = (struct tcp6_sock *)newsk;
1013 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1015 newinet = inet_sk(newsk);
1016 newnp = inet6_sk(newsk);
1017 newtp = tcp_sk(newsk);
1019 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1021 newnp->saddr = newsk->sk_v6_rcv_saddr;
1023 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1024 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1025 #ifdef CONFIG_TCP_MD5SIG
1026 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1029 newnp->ipv6_ac_list = NULL;
1030 newnp->ipv6_fl_list = NULL;
1031 newnp->pktoptions = NULL;
1033 newnp->mcast_oif = tcp_v6_iif(skb);
1034 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1035 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1037 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1040 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1041 * here, tcp_create_openreq_child now does this for us, see the comment in
1042 * that function for the gory details. -acme
1045 /* It is tricky place. Until this moment IPv4 tcp
1046 worked with IPv6 icsk.icsk_af_ops.
1049 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1054 ireq = inet_rsk(req);
1056 if (sk_acceptq_is_full(sk))
1060 dst = inet6_csk_route_req(sk, &fl6, req);
1065 newsk = tcp_create_openreq_child(sk, req, skb);
1070 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1071 * count here, tcp_create_openreq_child now does this for us, see the
1072 * comment in that function for the gory details. -acme
1075 newsk->sk_gso_type = SKB_GSO_TCPV6;
1076 __ip6_dst_store(newsk, dst, NULL, NULL);
1077 inet6_sk_rx_dst_set(newsk, skb);
1079 newtcp6sk = (struct tcp6_sock *)newsk;
1080 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1082 newtp = tcp_sk(newsk);
1083 newinet = inet_sk(newsk);
1084 newnp = inet6_sk(newsk);
1086 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1088 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1089 newnp->saddr = ireq->ir_v6_loc_addr;
1090 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1091 newsk->sk_bound_dev_if = ireq->ir_iif;
1093 /* Now IPv6 options...
1095 First: no IPv4 options.
1097 newinet->inet_opt = NULL;
1098 newnp->ipv6_ac_list = NULL;
1099 newnp->ipv6_fl_list = NULL;
1102 newnp->rxopt.all = np->rxopt.all;
1104 /* Clone pktoptions received with SYN */
1105 newnp->pktoptions = NULL;
1106 if (ireq->pktopts) {
1107 newnp->pktoptions = skb_clone(ireq->pktopts,
1108 sk_gfp_atomic(sk, GFP_ATOMIC));
1109 consume_skb(ireq->pktopts);
1110 ireq->pktopts = NULL;
1111 if (newnp->pktoptions)
1112 skb_set_owner_r(newnp->pktoptions, newsk);
1115 newnp->mcast_oif = tcp_v6_iif(skb);
1116 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1117 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1119 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1121 /* Clone native IPv6 options from listening socket (if any)
1123 Yes, keeping reference count would be much more clever,
1124 but we make one more one thing there: reattach optmem
1128 newnp->opt = ipv6_dup_options(newsk, np->opt);
1130 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1132 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1133 newnp->opt->opt_flen);
1135 tcp_ca_openreq_child(newsk, dst);
1137 tcp_sync_mss(newsk, dst_mtu(dst));
1138 newtp->advmss = dst_metric_advmss(dst);
1139 if (tcp_sk(sk)->rx_opt.user_mss &&
1140 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1141 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1143 tcp_initialize_rcv_mss(newsk);
1145 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1146 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1148 #ifdef CONFIG_TCP_MD5SIG
1149 /* Copy over the MD5 key from the original socket */
1150 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1152 /* We're using one, so create a matching key
1153 * on the newsk structure. If we fail to get
1154 * memory, then we end up not copying the key
1157 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1158 AF_INET6, key->key, key->keylen,
1159 sk_gfp_atomic(sk, GFP_ATOMIC));
1163 if (__inet_inherit_port(sk, newsk) < 0) {
1164 inet_csk_prepare_forced_close(newsk);
1168 __inet_hash(newsk, NULL);
1173 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1177 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1181 /* The socket must have it's spinlock held when we get
1184 * We have a potential double-lock case here, so even when
1185 * doing backlog processing we use the BH locking scheme.
1186 * This is because we cannot sleep with the original spinlock
1189 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1191 struct ipv6_pinfo *np = inet6_sk(sk);
1192 struct tcp_sock *tp;
1193 struct sk_buff *opt_skb = NULL;
1195 /* Imagine: socket is IPv6. IPv4 packet arrives,
1196 goes to IPv4 receive handler and backlogged.
1197 From backlog it always goes here. Kerboom...
1198 Fortunately, tcp_rcv_established and rcv_established
1199 handle them correctly, but it is not case with
1200 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1203 if (skb->protocol == htons(ETH_P_IP))
1204 return tcp_v4_do_rcv(sk, skb);
1206 if (sk_filter(sk, skb))
1210 * socket locking is here for SMP purposes as backlog rcv
1211 * is currently called with bh processing disabled.
1214 /* Do Stevens' IPV6_PKTOPTIONS.
1216 Yes, guys, it is the only place in our code, where we
1217 may make it not affecting IPv4.
1218 The rest of code is protocol independent,
1219 and I do not like idea to uglify IPv4.
1221 Actually, all the idea behind IPV6_PKTOPTIONS
1222 looks not very well thought. For now we latch
1223 options, received in the last packet, enqueued
1224 by tcp. Feel free to propose better solution.
1228 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1230 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1231 struct dst_entry *dst = sk->sk_rx_dst;
1233 sock_rps_save_rxhash(sk, skb);
1234 sk_mark_napi_id(sk, skb);
1236 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1237 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1239 sk->sk_rx_dst = NULL;
1243 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1245 goto ipv6_pktoptions;
1249 if (tcp_checksum_complete(skb))
1252 if (sk->sk_state == TCP_LISTEN) {
1253 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1258 * Queue it on the new socket if the new socket is active,
1259 * otherwise we just shortcircuit this and continue with
1263 sock_rps_save_rxhash(nsk, skb);
1264 sk_mark_napi_id(sk, skb);
1265 if (tcp_child_process(sk, nsk, skb))
1268 __kfree_skb(opt_skb);
1272 sock_rps_save_rxhash(sk, skb);
1274 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1277 goto ipv6_pktoptions;
1281 tcp_v6_send_reset(sk, skb);
1284 __kfree_skb(opt_skb);
1288 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1289 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1294 /* Do you ask, what is it?
1296 1. skb was enqueued by tcp.
1297 2. skb is added to tail of read queue, rather than out of order.
1298 3. socket is not in passive state.
1299 4. Finally, it really contains options, which user wants to receive.
1302 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1303 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1304 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1305 np->mcast_oif = tcp_v6_iif(opt_skb);
1306 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1307 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1308 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1309 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1311 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1312 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1313 skb_set_owner_r(opt_skb, sk);
1314 opt_skb = xchg(&np->pktoptions, opt_skb);
1316 __kfree_skb(opt_skb);
1317 opt_skb = xchg(&np->pktoptions, NULL);
1325 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1326 const struct tcphdr *th)
1328 /* This is tricky: we move IP6CB at its correct location into
1329 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1330 * _decode_session6() uses IP6CB().
1331 * barrier() makes sure compiler won't play aliasing games.
1333 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1334 sizeof(struct inet6_skb_parm));
1337 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1338 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1339 skb->len - th->doff*4);
1340 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1341 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1342 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1343 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1344 TCP_SKB_CB(skb)->sacked = 0;
1347 static void tcp_v6_restore_cb(struct sk_buff *skb)
1349 /* We need to move header back to the beginning if xfrm6_policy_check()
1350 * and tcp_v6_fill_cb() are going to be called again.
1352 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1353 sizeof(struct inet6_skb_parm));
1356 static int tcp_v6_rcv(struct sk_buff *skb)
1358 const struct tcphdr *th;
1359 const struct ipv6hdr *hdr;
1362 struct net *net = dev_net(skb->dev);
1364 if (skb->pkt_type != PACKET_HOST)
1368 * Count it even if it's bad.
1370 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1372 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1377 if (th->doff < sizeof(struct tcphdr)/4)
1379 if (!pskb_may_pull(skb, th->doff*4))
1382 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1386 hdr = ipv6_hdr(skb);
1388 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1394 if (sk->sk_state == TCP_TIME_WAIT)
1397 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1398 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1399 goto discard_and_relse;
1402 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1403 goto discard_and_relse;
1405 tcp_v6_fill_cb(skb, hdr, th);
1407 #ifdef CONFIG_TCP_MD5SIG
1408 if (tcp_v6_inbound_md5_hash(sk, skb))
1409 goto discard_and_relse;
1412 if (sk_filter(sk, skb))
1413 goto discard_and_relse;
1415 sk_incoming_cpu_update(sk);
1418 bh_lock_sock_nested(sk);
1419 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1421 if (!sock_owned_by_user(sk)) {
1422 if (!tcp_prequeue(sk, skb))
1423 ret = tcp_v6_do_rcv(sk, skb);
1424 } else if (unlikely(sk_add_backlog(sk, skb,
1425 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1427 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1428 goto discard_and_relse;
1433 return ret ? -1 : 0;
1436 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1439 tcp_v6_fill_cb(skb, hdr, th);
1441 if (tcp_checksum_complete(skb)) {
1443 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1445 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1447 tcp_v6_send_reset(NULL, skb);
1459 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1460 inet_twsk_put(inet_twsk(sk));
1464 tcp_v6_fill_cb(skb, hdr, th);
1466 if (tcp_checksum_complete(skb)) {
1467 inet_twsk_put(inet_twsk(sk));
1471 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1476 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1477 &ipv6_hdr(skb)->saddr, th->source,
1478 &ipv6_hdr(skb)->daddr,
1479 ntohs(th->dest), tcp_v6_iif(skb));
1481 struct inet_timewait_sock *tw = inet_twsk(sk);
1482 inet_twsk_deschedule_put(tw);
1484 tcp_v6_restore_cb(skb);
1487 /* Fall through to ACK */
1490 tcp_v6_timewait_ack(sk, skb);
1493 tcp_v6_restore_cb(skb);
1495 case TCP_TW_SUCCESS:
1501 static void tcp_v6_early_demux(struct sk_buff *skb)
1503 const struct ipv6hdr *hdr;
1504 const struct tcphdr *th;
1507 if (skb->pkt_type != PACKET_HOST)
1510 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1513 hdr = ipv6_hdr(skb);
1516 if (th->doff < sizeof(struct tcphdr) / 4)
1519 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1520 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1521 &hdr->saddr, th->source,
1522 &hdr->daddr, ntohs(th->dest),
1526 skb->destructor = sock_edemux;
1527 if (sk_fullsock(sk)) {
1528 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1531 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1533 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1534 skb_dst_set_noref(skb, dst);
1539 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1540 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1541 .twsk_unique = tcp_twsk_unique,
1542 .twsk_destructor = tcp_twsk_destructor,
1545 static const struct inet_connection_sock_af_ops ipv6_specific = {
1546 .queue_xmit = inet6_csk_xmit,
1547 .send_check = tcp_v6_send_check,
1548 .rebuild_header = inet6_sk_rebuild_header,
1549 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1550 .conn_request = tcp_v6_conn_request,
1551 .syn_recv_sock = tcp_v6_syn_recv_sock,
1552 .net_header_len = sizeof(struct ipv6hdr),
1553 .net_frag_header_len = sizeof(struct frag_hdr),
1554 .setsockopt = ipv6_setsockopt,
1555 .getsockopt = ipv6_getsockopt,
1556 .addr2sockaddr = inet6_csk_addr2sockaddr,
1557 .sockaddr_len = sizeof(struct sockaddr_in6),
1558 .bind_conflict = inet6_csk_bind_conflict,
1559 #ifdef CONFIG_COMPAT
1560 .compat_setsockopt = compat_ipv6_setsockopt,
1561 .compat_getsockopt = compat_ipv6_getsockopt,
1563 .mtu_reduced = tcp_v6_mtu_reduced,
1566 #ifdef CONFIG_TCP_MD5SIG
1567 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1568 .md5_lookup = tcp_v6_md5_lookup,
1569 .calc_md5_hash = tcp_v6_md5_hash_skb,
1570 .md5_parse = tcp_v6_parse_md5_keys,
1575 * TCP over IPv4 via INET6 API
1577 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1578 .queue_xmit = ip_queue_xmit,
1579 .send_check = tcp_v4_send_check,
1580 .rebuild_header = inet_sk_rebuild_header,
1581 .sk_rx_dst_set = inet_sk_rx_dst_set,
1582 .conn_request = tcp_v6_conn_request,
1583 .syn_recv_sock = tcp_v6_syn_recv_sock,
1584 .net_header_len = sizeof(struct iphdr),
1585 .setsockopt = ipv6_setsockopt,
1586 .getsockopt = ipv6_getsockopt,
1587 .addr2sockaddr = inet6_csk_addr2sockaddr,
1588 .sockaddr_len = sizeof(struct sockaddr_in6),
1589 .bind_conflict = inet6_csk_bind_conflict,
1590 #ifdef CONFIG_COMPAT
1591 .compat_setsockopt = compat_ipv6_setsockopt,
1592 .compat_getsockopt = compat_ipv6_getsockopt,
1594 .mtu_reduced = tcp_v4_mtu_reduced,
1597 #ifdef CONFIG_TCP_MD5SIG
1598 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1599 .md5_lookup = tcp_v4_md5_lookup,
1600 .calc_md5_hash = tcp_v4_md5_hash_skb,
1601 .md5_parse = tcp_v6_parse_md5_keys,
1605 /* NOTE: A lot of things set to zero explicitly by call to
1606 * sk_alloc() so need not be done here.
1608 static int tcp_v6_init_sock(struct sock *sk)
1610 struct inet_connection_sock *icsk = inet_csk(sk);
1614 icsk->icsk_af_ops = &ipv6_specific;
1616 #ifdef CONFIG_TCP_MD5SIG
1617 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1623 static void tcp_v6_destroy_sock(struct sock *sk)
1625 tcp_v4_destroy_sock(sk);
1626 inet6_destroy_sock(sk);
1629 #ifdef CONFIG_PROC_FS
1630 /* Proc filesystem TCPv6 sock list dumping. */
1631 static void get_openreq6(struct seq_file *seq,
1632 struct request_sock *req, int i, kuid_t uid)
1634 long ttd = req->rsk_timer.expires - jiffies;
1635 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1636 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1642 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1643 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1645 src->s6_addr32[0], src->s6_addr32[1],
1646 src->s6_addr32[2], src->s6_addr32[3],
1647 inet_rsk(req)->ir_num,
1648 dest->s6_addr32[0], dest->s6_addr32[1],
1649 dest->s6_addr32[2], dest->s6_addr32[3],
1650 ntohs(inet_rsk(req)->ir_rmt_port),
1652 0, 0, /* could print option size, but that is af dependent. */
1653 1, /* timers active (only the expire timer) */
1654 jiffies_to_clock_t(ttd),
1656 from_kuid_munged(seq_user_ns(seq), uid),
1657 0, /* non standard timer */
1658 0, /* open_requests have no inode */
1662 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1664 const struct in6_addr *dest, *src;
1667 unsigned long timer_expires;
1668 const struct inet_sock *inet = inet_sk(sp);
1669 const struct tcp_sock *tp = tcp_sk(sp);
1670 const struct inet_connection_sock *icsk = inet_csk(sp);
1671 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1673 dest = &sp->sk_v6_daddr;
1674 src = &sp->sk_v6_rcv_saddr;
1675 destp = ntohs(inet->inet_dport);
1676 srcp = ntohs(inet->inet_sport);
1678 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1680 timer_expires = icsk->icsk_timeout;
1681 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1683 timer_expires = icsk->icsk_timeout;
1684 } else if (timer_pending(&sp->sk_timer)) {
1686 timer_expires = sp->sk_timer.expires;
1689 timer_expires = jiffies;
1693 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1694 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1696 src->s6_addr32[0], src->s6_addr32[1],
1697 src->s6_addr32[2], src->s6_addr32[3], srcp,
1698 dest->s6_addr32[0], dest->s6_addr32[1],
1699 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1701 tp->write_seq-tp->snd_una,
1702 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1704 jiffies_delta_to_clock_t(timer_expires - jiffies),
1705 icsk->icsk_retransmits,
1706 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1707 icsk->icsk_probes_out,
1709 atomic_read(&sp->sk_refcnt), sp,
1710 jiffies_to_clock_t(icsk->icsk_rto),
1711 jiffies_to_clock_t(icsk->icsk_ack.ato),
1712 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1714 sp->sk_state == TCP_LISTEN ?
1715 (fastopenq ? fastopenq->max_qlen : 0) :
1716 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1720 static void get_timewait6_sock(struct seq_file *seq,
1721 struct inet_timewait_sock *tw, int i)
1723 long delta = tw->tw_timer.expires - jiffies;
1724 const struct in6_addr *dest, *src;
1727 dest = &tw->tw_v6_daddr;
1728 src = &tw->tw_v6_rcv_saddr;
1729 destp = ntohs(tw->tw_dport);
1730 srcp = ntohs(tw->tw_sport);
1733 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1734 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1736 src->s6_addr32[0], src->s6_addr32[1],
1737 src->s6_addr32[2], src->s6_addr32[3], srcp,
1738 dest->s6_addr32[0], dest->s6_addr32[1],
1739 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1740 tw->tw_substate, 0, 0,
1741 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1742 atomic_read(&tw->tw_refcnt), tw);
1745 static int tcp6_seq_show(struct seq_file *seq, void *v)
1747 struct tcp_iter_state *st;
1748 struct sock *sk = v;
1750 if (v == SEQ_START_TOKEN) {
1755 "st tx_queue rx_queue tr tm->when retrnsmt"
1756 " uid timeout inode\n");
1761 switch (st->state) {
1762 case TCP_SEQ_STATE_LISTENING:
1763 case TCP_SEQ_STATE_ESTABLISHED:
1764 if (sk->sk_state == TCP_TIME_WAIT)
1765 get_timewait6_sock(seq, v, st->num);
1767 get_tcp6_sock(seq, v, st->num);
1769 case TCP_SEQ_STATE_OPENREQ:
1770 get_openreq6(seq, v, st->num, st->uid);
1777 static const struct file_operations tcp6_afinfo_seq_fops = {
1778 .owner = THIS_MODULE,
1779 .open = tcp_seq_open,
1781 .llseek = seq_lseek,
1782 .release = seq_release_net
1785 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1788 .seq_fops = &tcp6_afinfo_seq_fops,
1790 .show = tcp6_seq_show,
1794 int __net_init tcp6_proc_init(struct net *net)
1796 return tcp_proc_register(net, &tcp6_seq_afinfo);
1799 void tcp6_proc_exit(struct net *net)
1801 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1805 static void tcp_v6_clear_sk(struct sock *sk, int size)
1807 struct inet_sock *inet = inet_sk(sk);
1809 /* we do not want to clear pinet6 field, because of RCU lookups */
1810 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1812 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1813 memset(&inet->pinet6 + 1, 0, size);
1816 struct proto tcpv6_prot = {
1818 .owner = THIS_MODULE,
1820 .connect = tcp_v6_connect,
1821 .disconnect = tcp_disconnect,
1822 .accept = inet_csk_accept,
1824 .init = tcp_v6_init_sock,
1825 .destroy = tcp_v6_destroy_sock,
1826 .shutdown = tcp_shutdown,
1827 .setsockopt = tcp_setsockopt,
1828 .getsockopt = tcp_getsockopt,
1829 .recvmsg = tcp_recvmsg,
1830 .sendmsg = tcp_sendmsg,
1831 .sendpage = tcp_sendpage,
1832 .backlog_rcv = tcp_v6_do_rcv,
1833 .release_cb = tcp_release_cb,
1835 .unhash = inet_unhash,
1836 .get_port = inet_csk_get_port,
1837 .enter_memory_pressure = tcp_enter_memory_pressure,
1838 .stream_memory_free = tcp_stream_memory_free,
1839 .sockets_allocated = &tcp_sockets_allocated,
1840 .memory_allocated = &tcp_memory_allocated,
1841 .memory_pressure = &tcp_memory_pressure,
1842 .orphan_count = &tcp_orphan_count,
1843 .sysctl_mem = sysctl_tcp_mem,
1844 .sysctl_wmem = sysctl_tcp_wmem,
1845 .sysctl_rmem = sysctl_tcp_rmem,
1846 .max_header = MAX_TCP_HEADER,
1847 .obj_size = sizeof(struct tcp6_sock),
1848 .slab_flags = SLAB_DESTROY_BY_RCU,
1849 .twsk_prot = &tcp6_timewait_sock_ops,
1850 .rsk_prot = &tcp6_request_sock_ops,
1851 .h.hashinfo = &tcp_hashinfo,
1852 .no_autobind = true,
1853 #ifdef CONFIG_COMPAT
1854 .compat_setsockopt = compat_tcp_setsockopt,
1855 .compat_getsockopt = compat_tcp_getsockopt,
1857 #ifdef CONFIG_MEMCG_KMEM
1858 .proto_cgroup = tcp_proto_cgroup,
1860 .clear_sk = tcp_v6_clear_sk,
1863 static const struct inet6_protocol tcpv6_protocol = {
1864 .early_demux = tcp_v6_early_demux,
1865 .handler = tcp_v6_rcv,
1866 .err_handler = tcp_v6_err,
1867 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1870 static struct inet_protosw tcpv6_protosw = {
1871 .type = SOCK_STREAM,
1872 .protocol = IPPROTO_TCP,
1873 .prot = &tcpv6_prot,
1874 .ops = &inet6_stream_ops,
1875 .flags = INET_PROTOSW_PERMANENT |
1879 static int __net_init tcpv6_net_init(struct net *net)
1881 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1882 SOCK_RAW, IPPROTO_TCP, net);
1885 static void __net_exit tcpv6_net_exit(struct net *net)
1887 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1890 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1892 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1895 static struct pernet_operations tcpv6_net_ops = {
1896 .init = tcpv6_net_init,
1897 .exit = tcpv6_net_exit,
1898 .exit_batch = tcpv6_net_exit_batch,
1901 int __init tcpv6_init(void)
1905 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1909 /* register inet6 protocol */
1910 ret = inet6_register_protosw(&tcpv6_protosw);
1912 goto out_tcpv6_protocol;
1914 ret = register_pernet_subsys(&tcpv6_net_ops);
1916 goto out_tcpv6_protosw;
1921 inet6_unregister_protosw(&tcpv6_protosw);
1923 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1927 void tcpv6_exit(void)
1929 unregister_pernet_subsys(&tcpv6_net_ops);
1930 inet6_unregister_protosw(&tcpv6_protosw);
1931 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);