2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 #include <net/busy_poll.h>
80 #include <linux/inet.h>
81 #include <linux/ipv6.h>
82 #include <linux/stddef.h>
83 #include <linux/proc_fs.h>
84 #include <linux/seq_file.h>
86 #include <linux/crypto.h>
87 #include <linux/scatterlist.h>
89 int sysctl_tcp_tw_reuse __read_mostly;
90 int sysctl_tcp_low_latency __read_mostly;
91 EXPORT_SYMBOL(sysctl_tcp_low_latency);
94 #ifdef CONFIG_TCP_MD5SIG
95 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
96 __be32 daddr, __be32 saddr, const struct tcphdr *th);
99 struct inet_hashinfo tcp_hashinfo;
100 EXPORT_SYMBOL(tcp_hashinfo);
102 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
104 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
107 tcp_hdr(skb)->source);
110 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
113 struct tcp_sock *tp = tcp_sk(sk);
115 /* With PAWS, it is safe from the viewpoint
116 of data integrity. Even without PAWS it is safe provided sequence
117 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
119 Actually, the idea is close to VJ's one, only timestamp cache is
120 held not per host, but per port pair and TW bucket is used as state
123 If TW bucket has been already destroyed we fall back to VJ's scheme
124 and use initial timestamp retrieved from peer table.
126 if (tcptw->tw_ts_recent_stamp &&
127 (twp == NULL || (sysctl_tcp_tw_reuse &&
128 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
129 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
130 if (tp->write_seq == 0)
132 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
133 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
140 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
142 /* This will initiate an outgoing connection. */
143 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
145 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
146 struct inet_sock *inet = inet_sk(sk);
147 struct tcp_sock *tp = tcp_sk(sk);
148 __be16 orig_sport, orig_dport;
149 __be32 daddr, nexthop;
153 struct ip_options_rcu *inet_opt;
155 if (addr_len < sizeof(struct sockaddr_in))
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
161 nexthop = daddr = usin->sin_addr.s_addr;
162 inet_opt = rcu_dereference_protected(inet->inet_opt,
163 sock_owned_by_user(sk));
164 if (inet_opt && inet_opt->opt.srr) {
167 nexthop = inet_opt->opt.faddr;
170 orig_sport = inet->inet_sport;
171 orig_dport = usin->sin_port;
172 fl4 = &inet->cork.fl.u.ip4;
173 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
174 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
176 orig_sport, orig_dport, sk);
179 if (err == -ENETUNREACH)
180 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
184 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
189 if (!inet_opt || !inet_opt->opt.srr)
192 if (!inet->inet_saddr)
193 inet->inet_saddr = fl4->saddr;
194 inet->inet_rcv_saddr = inet->inet_saddr;
196 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
197 /* Reset inherited state */
198 tp->rx_opt.ts_recent = 0;
199 tp->rx_opt.ts_recent_stamp = 0;
200 if (likely(!tp->repair))
204 if (tcp_death_row.sysctl_tw_recycle &&
205 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
206 tcp_fetch_timewait_stamp(sk, &rt->dst);
208 inet->inet_dport = usin->sin_port;
209 inet->inet_daddr = daddr;
211 inet_csk(sk)->icsk_ext_hdr_len = 0;
213 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
215 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
217 /* Socket identity is still unknown (sport may be zero).
218 * However we set state to SYN-SENT and not releasing socket
219 * lock select source port, enter ourselves into the hash tables and
220 * complete initialization after this.
222 tcp_set_state(sk, TCP_SYN_SENT);
223 err = inet_hash_connect(&tcp_death_row, sk);
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
244 inet->inet_id = tp->write_seq ^ jiffies;
246 err = tcp_connect(sk);
256 * This unhashes the socket and releases the local port,
259 tcp_set_state(sk, TCP_CLOSE);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
265 EXPORT_SYMBOL(tcp_v4_connect);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 static void tcp_v4_mtu_reduced(struct sock *sk)
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
278 dst = inet_csk_update_pmtu(sk, mtu);
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 tcp_sync_mss(sk, mtu);
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
304 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 struct dst_entry *dst = __sk_dst_check(sk, 0);
309 dst->ops->redirect(dst, sk, skb);
313 * This routine is called by the ICMP module when it gets some
314 * sort of error condition. If err < 0 then the socket should
315 * be closed and the error returned to the user. If err > 0
316 * it's just the icmp type << 8 | icmp code. After adjustment
317 * header points to the first 8 bytes of the tcp header. We need
318 * to find the appropriate port.
320 * The locking strategy used here is very "optimistic". When
321 * someone else accesses the socket the ICMP is just dropped
322 * and for some paths there is no check at all.
323 * A more general error queue to queue errors for later handling
324 * is probably better.
328 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
330 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
331 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
332 struct inet_connection_sock *icsk;
334 struct inet_sock *inet;
335 const int type = icmp_hdr(icmp_skb)->type;
336 const int code = icmp_hdr(icmp_skb)->code;
339 struct request_sock *req;
343 struct net *net = dev_net(icmp_skb->dev);
345 if (icmp_skb->len < (iph->ihl << 2) + 8) {
346 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
350 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
351 iph->saddr, th->source, inet_iif(icmp_skb));
353 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
356 if (sk->sk_state == TCP_TIME_WAIT) {
357 inet_twsk_put(inet_twsk(sk));
362 /* If too many ICMPs get dropped on busy
363 * servers this needs to be solved differently.
364 * We do take care of PMTU discovery (RFC1191) special case :
365 * we can receive locally generated ICMP messages while socket is held.
367 if (sock_owned_by_user(sk)) {
368 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
369 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
371 if (sk->sk_state == TCP_CLOSE)
374 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
375 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
381 req = tp->fastopen_rsk;
382 seq = ntohl(th->seq);
383 if (sk->sk_state != TCP_LISTEN &&
384 !between(seq, tp->snd_una, tp->snd_nxt) &&
385 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
386 /* For a Fast Open socket, allow seq to be snt_isn. */
387 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
393 do_redirect(icmp_skb, sk);
395 case ICMP_SOURCE_QUENCH:
396 /* Just silently ignore these. */
398 case ICMP_PARAMETERPROB:
401 case ICMP_DEST_UNREACH:
402 if (code > NR_ICMP_UNREACH)
405 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
406 /* We are not interested in TCP_LISTEN and open_requests
407 * (SYN-ACKs send out by Linux are always <576bytes so
408 * they should go through unfragmented).
410 if (sk->sk_state == TCP_LISTEN)
414 if (!sock_owned_by_user(sk)) {
415 tcp_v4_mtu_reduced(sk);
417 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
423 err = icmp_err_convert[code].errno;
424 /* check if icmp_skb allows revert of backoff
425 * (see draft-zimmermann-tcp-lcd) */
426 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
428 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
432 /* XXX (TFO) - revisit the following logic for TFO */
434 if (sock_owned_by_user(sk))
437 icsk->icsk_backoff--;
438 inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) :
439 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
442 skb = tcp_write_queue_head(sk);
445 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
446 tcp_time_stamp - TCP_SKB_CB(skb)->when);
449 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
450 remaining, TCP_RTO_MAX);
452 /* RTO revert clocked out retransmission.
453 * Will retransmit now */
454 tcp_retransmit_timer(sk);
458 case ICMP_TIME_EXCEEDED:
465 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
466 * than following the TCP_SYN_RECV case and closing the socket,
467 * we ignore the ICMP error and keep trying like a fully established
468 * socket. Is this the right thing to do?
470 if (req && req->sk == NULL)
473 switch (sk->sk_state) {
474 struct request_sock *req, **prev;
476 if (sock_owned_by_user(sk))
479 req = inet_csk_search_req(sk, &prev, th->dest,
480 iph->daddr, iph->saddr);
484 /* ICMPs are not backlogged, hence we cannot get
485 an established socket here.
489 if (seq != tcp_rsk(req)->snt_isn) {
490 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
495 * Still in SYN_RECV, just remove it silently.
496 * There is no good way to pass the error to the newly
497 * created socket, and POSIX does not want network
498 * errors returned from accept().
500 inet_csk_reqsk_queue_drop(sk, req, prev);
501 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
505 case TCP_SYN_RECV: /* Cannot happen.
506 It can f.e. if SYNs crossed,
509 if (!sock_owned_by_user(sk)) {
512 sk->sk_error_report(sk);
516 sk->sk_err_soft = err;
521 /* If we've already connected we will keep trying
522 * until we time out, or the user gives up.
524 * rfc1122 4.2.3.9 allows to consider as hard errors
525 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
526 * but it is obsoleted by pmtu discovery).
528 * Note, that in modern internet, where routing is unreliable
529 * and in each dark corner broken firewalls sit, sending random
530 * errors ordered by their masters even this two messages finally lose
531 * their original sense (even Linux sends invalid PORT_UNREACHs)
533 * Now we are in compliance with RFCs.
538 if (!sock_owned_by_user(sk) && inet->recverr) {
540 sk->sk_error_report(sk);
541 } else { /* Only an error on timeout */
542 sk->sk_err_soft = err;
550 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
552 struct tcphdr *th = tcp_hdr(skb);
554 if (skb->ip_summed == CHECKSUM_PARTIAL) {
555 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
556 skb->csum_start = skb_transport_header(skb) - skb->head;
557 skb->csum_offset = offsetof(struct tcphdr, check);
559 th->check = tcp_v4_check(skb->len, saddr, daddr,
566 /* This routine computes an IPv4 TCP checksum. */
567 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
569 const struct inet_sock *inet = inet_sk(sk);
571 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
573 EXPORT_SYMBOL(tcp_v4_send_check);
576 * This routine will send an RST to the other tcp.
578 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
580 * Answer: if a packet caused RST, it is not for a socket
581 * existing in our system, if it is matched to a socket,
582 * it is just duplicate segment or bug in other side's TCP.
583 * So that we build reply only basing on parameters
584 * arrived with segment.
585 * Exception: precedence violation. We do not implement it in any case.
588 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
590 const struct tcphdr *th = tcp_hdr(skb);
593 #ifdef CONFIG_TCP_MD5SIG
594 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
597 struct ip_reply_arg arg;
598 #ifdef CONFIG_TCP_MD5SIG
599 struct tcp_md5sig_key *key;
600 const __u8 *hash_location = NULL;
601 unsigned char newhash[16];
603 struct sock *sk1 = NULL;
607 /* Never send a reset in response to a reset. */
611 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
614 /* Swap the send and the receive. */
615 memset(&rep, 0, sizeof(rep));
616 rep.th.dest = th->source;
617 rep.th.source = th->dest;
618 rep.th.doff = sizeof(struct tcphdr) / 4;
622 rep.th.seq = th->ack_seq;
625 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
626 skb->len - (th->doff << 2));
629 memset(&arg, 0, sizeof(arg));
630 arg.iov[0].iov_base = (unsigned char *)&rep;
631 arg.iov[0].iov_len = sizeof(rep.th);
633 #ifdef CONFIG_TCP_MD5SIG
634 hash_location = tcp_parse_md5sig_option(th);
635 if (!sk && hash_location) {
637 * active side is lost. Try to find listening socket through
638 * source port, and then find md5 key through listening socket.
639 * we are not loose security here:
640 * Incoming packet is checked with md5 hash with finding key,
641 * no RST generated if md5 hash doesn't match.
643 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
644 &tcp_hashinfo, ip_hdr(skb)->saddr,
645 th->source, ip_hdr(skb)->daddr,
646 ntohs(th->source), inet_iif(skb));
647 /* don't send rst if it can't find key */
651 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
652 &ip_hdr(skb)->saddr, AF_INET);
656 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
657 if (genhash || memcmp(hash_location, newhash, 16) != 0)
660 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
666 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
668 (TCPOPT_MD5SIG << 8) |
670 /* Update length and the length the header thinks exists */
671 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
672 rep.th.doff = arg.iov[0].iov_len / 4;
674 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
675 key, ip_hdr(skb)->saddr,
676 ip_hdr(skb)->daddr, &rep.th);
679 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
680 ip_hdr(skb)->saddr, /* XXX */
681 arg.iov[0].iov_len, IPPROTO_TCP, 0);
682 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
683 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
684 /* When socket is gone, all binding information is lost.
685 * routing might fail in this case. No choice here, if we choose to force
686 * input interface, we will misroute in case of asymmetric route.
689 arg.bound_dev_if = sk->sk_bound_dev_if;
691 net = dev_net(skb_dst(skb)->dev);
692 arg.tos = ip_hdr(skb)->tos;
693 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
694 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
696 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
697 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
699 #ifdef CONFIG_TCP_MD5SIG
708 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
709 outside socket context is ugly, certainly. What can I do?
712 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
713 u32 win, u32 tsval, u32 tsecr, int oif,
714 struct tcp_md5sig_key *key,
715 int reply_flags, u8 tos)
717 const struct tcphdr *th = tcp_hdr(skb);
720 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
721 #ifdef CONFIG_TCP_MD5SIG
722 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
726 struct ip_reply_arg arg;
727 struct net *net = dev_net(skb_dst(skb)->dev);
729 memset(&rep.th, 0, sizeof(struct tcphdr));
730 memset(&arg, 0, sizeof(arg));
732 arg.iov[0].iov_base = (unsigned char *)&rep;
733 arg.iov[0].iov_len = sizeof(rep.th);
735 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
736 (TCPOPT_TIMESTAMP << 8) |
738 rep.opt[1] = htonl(tsval);
739 rep.opt[2] = htonl(tsecr);
740 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
743 /* Swap the send and the receive. */
744 rep.th.dest = th->source;
745 rep.th.source = th->dest;
746 rep.th.doff = arg.iov[0].iov_len / 4;
747 rep.th.seq = htonl(seq);
748 rep.th.ack_seq = htonl(ack);
750 rep.th.window = htons(win);
752 #ifdef CONFIG_TCP_MD5SIG
754 int offset = (tsecr) ? 3 : 0;
756 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
758 (TCPOPT_MD5SIG << 8) |
760 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
761 rep.th.doff = arg.iov[0].iov_len/4;
763 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
764 key, ip_hdr(skb)->saddr,
765 ip_hdr(skb)->daddr, &rep.th);
768 arg.flags = reply_flags;
769 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
770 ip_hdr(skb)->saddr, /* XXX */
771 arg.iov[0].iov_len, IPPROTO_TCP, 0);
772 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
774 arg.bound_dev_if = oif;
776 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
777 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
779 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
782 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
784 struct inet_timewait_sock *tw = inet_twsk(sk);
785 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
787 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
788 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
789 tcp_time_stamp + tcptw->tw_ts_offset,
792 tcp_twsk_md5_key(tcptw),
793 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
800 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
801 struct request_sock *req)
803 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
804 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
806 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
807 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
808 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
812 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
814 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
819 * Send a SYN-ACK after having received a SYN.
820 * This still operates on a request_sock only, not on a big
823 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
824 struct request_sock *req,
827 const struct inet_request_sock *ireq = inet_rsk(req);
832 /* First, grab a route. */
833 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
836 skb = tcp_make_synack(sk, dst, req, NULL);
839 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
841 skb_set_queue_mapping(skb, queue_mapping);
842 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
845 err = net_xmit_eval(err);
846 if (!tcp_rsk(req)->snt_synack && !err)
847 tcp_rsk(req)->snt_synack = tcp_time_stamp;
853 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
855 int res = tcp_v4_send_synack(sk, NULL, req, 0);
858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
859 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
865 * IPv4 request_sock destructor.
867 static void tcp_v4_reqsk_destructor(struct request_sock *req)
869 kfree(inet_rsk(req)->opt);
873 * Return true if a syncookie should be sent
875 bool tcp_syn_flood_action(struct sock *sk,
876 const struct sk_buff *skb,
879 const char *msg = "Dropping request";
880 bool want_cookie = false;
881 struct listen_sock *lopt;
883 #ifdef CONFIG_SYN_COOKIES
884 if (sysctl_tcp_syncookies) {
885 msg = "Sending cookies";
887 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
890 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
892 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
893 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
894 lopt->synflood_warned = 1;
895 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
896 proto, ntohs(tcp_hdr(skb)->dest), msg);
900 EXPORT_SYMBOL(tcp_syn_flood_action);
903 * Save and compile IPv4 options into the request_sock if needed.
905 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
907 const struct ip_options *opt = &(IPCB(skb)->opt);
908 struct ip_options_rcu *dopt = NULL;
910 if (opt && opt->optlen) {
911 int opt_size = sizeof(*dopt) + opt->optlen;
913 dopt = kmalloc(opt_size, GFP_ATOMIC);
915 if (ip_options_echo(&dopt->opt, skb)) {
924 #ifdef CONFIG_TCP_MD5SIG
926 * RFC2385 MD5 checksumming requires a mapping of
927 * IP address->MD5 Key.
928 * We need to maintain these in the sk structure.
931 /* Find the Key structure for an address. */
932 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
933 const union tcp_md5_addr *addr,
936 struct tcp_sock *tp = tcp_sk(sk);
937 struct tcp_md5sig_key *key;
938 unsigned int size = sizeof(struct in_addr);
939 struct tcp_md5sig_info *md5sig;
941 /* caller either holds rcu_read_lock() or socket lock */
942 md5sig = rcu_dereference_check(tp->md5sig_info,
943 sock_owned_by_user(sk) ||
944 lockdep_is_held(&sk->sk_lock.slock));
947 #if IS_ENABLED(CONFIG_IPV6)
948 if (family == AF_INET6)
949 size = sizeof(struct in6_addr);
951 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
952 if (key->family != family)
954 if (!memcmp(&key->addr, addr, size))
959 EXPORT_SYMBOL(tcp_md5_do_lookup);
961 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
962 struct sock *addr_sk)
964 union tcp_md5_addr *addr;
966 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
967 return tcp_md5_do_lookup(sk, addr, AF_INET);
969 EXPORT_SYMBOL(tcp_v4_md5_lookup);
971 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
972 struct request_sock *req)
974 union tcp_md5_addr *addr;
976 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
977 return tcp_md5_do_lookup(sk, addr, AF_INET);
980 /* This can be called on a newly created socket, from other files */
981 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
982 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
984 /* Add Key to the list */
985 struct tcp_md5sig_key *key;
986 struct tcp_sock *tp = tcp_sk(sk);
987 struct tcp_md5sig_info *md5sig;
989 key = tcp_md5_do_lookup(sk, addr, family);
991 /* Pre-existing entry - just update that one. */
992 memcpy(key->key, newkey, newkeylen);
993 key->keylen = newkeylen;
997 md5sig = rcu_dereference_protected(tp->md5sig_info,
998 sock_owned_by_user(sk));
1000 md5sig = kmalloc(sizeof(*md5sig), gfp);
1004 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1005 INIT_HLIST_HEAD(&md5sig->head);
1006 rcu_assign_pointer(tp->md5sig_info, md5sig);
1009 key = sock_kmalloc(sk, sizeof(*key), gfp);
1012 if (!tcp_alloc_md5sig_pool()) {
1013 sock_kfree_s(sk, key, sizeof(*key));
1017 memcpy(key->key, newkey, newkeylen);
1018 key->keylen = newkeylen;
1019 key->family = family;
1020 memcpy(&key->addr, addr,
1021 (family == AF_INET6) ? sizeof(struct in6_addr) :
1022 sizeof(struct in_addr));
1023 hlist_add_head_rcu(&key->node, &md5sig->head);
1026 EXPORT_SYMBOL(tcp_md5_do_add);
1028 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1030 struct tcp_md5sig_key *key;
1032 key = tcp_md5_do_lookup(sk, addr, family);
1035 hlist_del_rcu(&key->node);
1036 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1037 kfree_rcu(key, rcu);
1040 EXPORT_SYMBOL(tcp_md5_do_del);
1042 static void tcp_clear_md5_list(struct sock *sk)
1044 struct tcp_sock *tp = tcp_sk(sk);
1045 struct tcp_md5sig_key *key;
1046 struct hlist_node *n;
1047 struct tcp_md5sig_info *md5sig;
1049 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1051 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1052 hlist_del_rcu(&key->node);
1053 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1054 kfree_rcu(key, rcu);
1058 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1061 struct tcp_md5sig cmd;
1062 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1064 if (optlen < sizeof(cmd))
1067 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1070 if (sin->sin_family != AF_INET)
1073 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1074 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1077 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1080 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1081 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1085 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1086 __be32 daddr, __be32 saddr, int nbytes)
1088 struct tcp4_pseudohdr *bp;
1089 struct scatterlist sg;
1091 bp = &hp->md5_blk.ip4;
1094 * 1. the TCP pseudo-header (in the order: source IP address,
1095 * destination IP address, zero-padded protocol number, and
1101 bp->protocol = IPPROTO_TCP;
1102 bp->len = cpu_to_be16(nbytes);
1104 sg_init_one(&sg, bp, sizeof(*bp));
1105 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1108 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1109 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1111 struct tcp_md5sig_pool *hp;
1112 struct hash_desc *desc;
1114 hp = tcp_get_md5sig_pool();
1116 goto clear_hash_noput;
1117 desc = &hp->md5_desc;
1119 if (crypto_hash_init(desc))
1121 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1123 if (tcp_md5_hash_header(hp, th))
1125 if (tcp_md5_hash_key(hp, key))
1127 if (crypto_hash_final(desc, md5_hash))
1130 tcp_put_md5sig_pool();
1134 tcp_put_md5sig_pool();
1136 memset(md5_hash, 0, 16);
1140 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1141 const struct sock *sk, const struct request_sock *req,
1142 const struct sk_buff *skb)
1144 struct tcp_md5sig_pool *hp;
1145 struct hash_desc *desc;
1146 const struct tcphdr *th = tcp_hdr(skb);
1147 __be32 saddr, daddr;
1150 saddr = inet_sk(sk)->inet_saddr;
1151 daddr = inet_sk(sk)->inet_daddr;
1153 saddr = inet_rsk(req)->ir_loc_addr;
1154 daddr = inet_rsk(req)->ir_rmt_addr;
1156 const struct iphdr *iph = ip_hdr(skb);
1161 hp = tcp_get_md5sig_pool();
1163 goto clear_hash_noput;
1164 desc = &hp->md5_desc;
1166 if (crypto_hash_init(desc))
1169 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1171 if (tcp_md5_hash_header(hp, th))
1173 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1175 if (tcp_md5_hash_key(hp, key))
1177 if (crypto_hash_final(desc, md5_hash))
1180 tcp_put_md5sig_pool();
1184 tcp_put_md5sig_pool();
1186 memset(md5_hash, 0, 16);
1189 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1191 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1194 * This gets called for each TCP segment that arrives
1195 * so we want to be efficient.
1196 * We have 3 drop cases:
1197 * o No MD5 hash and one expected.
1198 * o MD5 hash and we're not expecting one.
1199 * o MD5 hash and its wrong.
1201 const __u8 *hash_location = NULL;
1202 struct tcp_md5sig_key *hash_expected;
1203 const struct iphdr *iph = ip_hdr(skb);
1204 const struct tcphdr *th = tcp_hdr(skb);
1206 unsigned char newhash[16];
1208 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1210 hash_location = tcp_parse_md5sig_option(th);
1212 /* We've parsed the options - do we have a hash? */
1213 if (!hash_expected && !hash_location)
1216 if (hash_expected && !hash_location) {
1217 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1221 if (!hash_expected && hash_location) {
1222 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1226 /* Okay, so this is hash_expected and hash_location -
1227 * so we need to calculate the checksum.
1229 genhash = tcp_v4_md5_hash_skb(newhash,
1233 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1234 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1235 &iph->saddr, ntohs(th->source),
1236 &iph->daddr, ntohs(th->dest),
1237 genhash ? " tcp_v4_calc_md5_hash failed"
1246 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1248 .obj_size = sizeof(struct tcp_request_sock),
1249 .rtx_syn_ack = tcp_v4_rtx_synack,
1250 .send_ack = tcp_v4_reqsk_send_ack,
1251 .destructor = tcp_v4_reqsk_destructor,
1252 .send_reset = tcp_v4_send_reset,
1253 .syn_ack_timeout = tcp_syn_ack_timeout,
1256 #ifdef CONFIG_TCP_MD5SIG
1257 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1258 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1259 .calc_md5_hash = tcp_v4_md5_hash_skb,
1263 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1265 struct tcp_options_received tmp_opt;
1266 struct request_sock *req;
1267 struct inet_request_sock *ireq;
1268 struct tcp_sock *tp = tcp_sk(sk);
1269 struct dst_entry *dst = NULL;
1270 __be32 saddr = ip_hdr(skb)->saddr;
1271 __be32 daddr = ip_hdr(skb)->daddr;
1272 __u32 isn = TCP_SKB_CB(skb)->when;
1273 bool want_cookie = false;
1275 struct tcp_fastopen_cookie foc = { .len = -1 };
1276 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1277 struct sk_buff *skb_synack;
1280 /* Never answer to SYNs send to broadcast or multicast */
1281 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1284 /* TW buckets are converted to open requests without
1285 * limitations, they conserve resources and peer is
1286 * evidently real one.
1288 if ((sysctl_tcp_syncookies == 2 ||
1289 inet_csk_reqsk_queue_is_full(sk)) && !isn) {
1290 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1295 /* Accept backlog is full. If we have already queued enough
1296 * of warm entries in syn queue, drop request. It is better than
1297 * clogging syn queue with openreqs with exponentially increasing
1300 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1301 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1305 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1309 #ifdef CONFIG_TCP_MD5SIG
1310 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1313 tcp_clear_options(&tmp_opt);
1314 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1315 tmp_opt.user_mss = tp->rx_opt.user_mss;
1316 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1318 if (want_cookie && !tmp_opt.saw_tstamp)
1319 tcp_clear_options(&tmp_opt);
1321 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1322 tcp_openreq_init(req, &tmp_opt, skb);
1324 ireq = inet_rsk(req);
1325 ireq->ir_loc_addr = daddr;
1326 ireq->ir_rmt_addr = saddr;
1327 ireq->no_srccheck = inet_sk(sk)->transparent;
1328 ireq->opt = tcp_v4_save_options(skb);
1330 if (security_inet_conn_request(sk, skb, req))
1333 if (!want_cookie || tmp_opt.tstamp_ok)
1334 TCP_ECN_create_request(req, skb, sock_net(sk));
1337 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1338 req->cookie_ts = tmp_opt.tstamp_ok;
1340 /* VJ's idea. We save last timestamp seen
1341 * from the destination in peer table, when entering
1342 * state TIME-WAIT, and check against it before
1343 * accepting new connection request.
1345 * If "isn" is not zero, this request hit alive
1346 * timewait bucket, so that all the necessary checks
1347 * are made in the function processing timewait state.
1349 if (tmp_opt.saw_tstamp &&
1350 tcp_death_row.sysctl_tw_recycle &&
1351 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1352 fl4.daddr == saddr) {
1353 if (!tcp_peer_is_proven(req, dst, true)) {
1354 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1355 goto drop_and_release;
1358 /* Kill the following clause, if you dislike this way. */
1359 else if (!sysctl_tcp_syncookies &&
1360 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1361 (sysctl_max_syn_backlog >> 2)) &&
1362 !tcp_peer_is_proven(req, dst, false)) {
1363 /* Without syncookies last quarter of
1364 * backlog is filled with destinations,
1365 * proven to be alive.
1366 * It means that we continue to communicate
1367 * to destinations, already remembered
1368 * to the moment of synflood.
1370 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1371 &saddr, ntohs(tcp_hdr(skb)->source));
1372 goto drop_and_release;
1375 isn = tcp_v4_init_sequence(skb);
1377 tcp_rsk(req)->snt_isn = isn;
1380 dst = inet_csk_route_req(sk, &fl4, req);
1384 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1386 /* We don't call tcp_v4_send_synack() directly because we need
1387 * to make sure a child socket can be created successfully before
1388 * sending back synack!
1390 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1391 * (or better yet, call tcp_send_synack() in the child context
1392 * directly, but will have to fix bunch of other code first)
1393 * after syn_recv_sock() except one will need to first fix the
1394 * latter to remove its dependency on the current implementation
1395 * of tcp_v4_send_synack()->tcp_select_initial_window().
1397 skb_synack = tcp_make_synack(sk, dst, req,
1398 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1401 __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1402 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1406 if (likely(!do_fastopen)) {
1408 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1409 ireq->ir_rmt_addr, ireq->opt);
1410 err = net_xmit_eval(err);
1411 if (err || want_cookie)
1414 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1415 tcp_rsk(req)->listener = NULL;
1416 /* Add the request_sock to the SYN table */
1417 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1418 if (fastopen_cookie_present(&foc) && foc.len != 0)
1419 NET_INC_STATS_BH(sock_net(sk),
1420 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1421 } else if (tcp_fastopen_create_child(sk, skb, skb_synack, req))
1422 goto drop_and_release;
1431 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1434 EXPORT_SYMBOL(tcp_v4_conn_request);
1438 * The three way handshake has completed - we got a valid synack -
1439 * now create the new socket.
1441 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1442 struct request_sock *req,
1443 struct dst_entry *dst)
1445 struct inet_request_sock *ireq;
1446 struct inet_sock *newinet;
1447 struct tcp_sock *newtp;
1449 #ifdef CONFIG_TCP_MD5SIG
1450 struct tcp_md5sig_key *key;
1452 struct ip_options_rcu *inet_opt;
1454 if (sk_acceptq_is_full(sk))
1457 newsk = tcp_create_openreq_child(sk, req, skb);
1461 newsk->sk_gso_type = SKB_GSO_TCPV4;
1462 inet_sk_rx_dst_set(newsk, skb);
1464 newtp = tcp_sk(newsk);
1465 newinet = inet_sk(newsk);
1466 ireq = inet_rsk(req);
1467 newinet->inet_daddr = ireq->ir_rmt_addr;
1468 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1469 newinet->inet_saddr = ireq->ir_loc_addr;
1470 inet_opt = ireq->opt;
1471 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1473 newinet->mc_index = inet_iif(skb);
1474 newinet->mc_ttl = ip_hdr(skb)->ttl;
1475 newinet->rcv_tos = ip_hdr(skb)->tos;
1476 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1478 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1479 newinet->inet_id = newtp->write_seq ^ jiffies;
1482 dst = inet_csk_route_child_sock(sk, newsk, req);
1486 /* syncookie case : see end of cookie_v4_check() */
1488 sk_setup_caps(newsk, dst);
1490 tcp_sync_mss(newsk, dst_mtu(dst));
1491 newtp->advmss = dst_metric_advmss(dst);
1492 if (tcp_sk(sk)->rx_opt.user_mss &&
1493 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1494 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1496 tcp_initialize_rcv_mss(newsk);
1498 #ifdef CONFIG_TCP_MD5SIG
1499 /* Copy over the MD5 key from the original socket */
1500 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1504 * We're using one, so create a matching key
1505 * on the newsk structure. If we fail to get
1506 * memory, then we end up not copying the key
1509 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1510 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1511 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1515 if (__inet_inherit_port(sk, newsk) < 0)
1517 __inet_hash_nolisten(newsk, NULL);
1522 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1526 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1529 inet_csk_prepare_forced_close(newsk);
1533 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1535 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1537 struct tcphdr *th = tcp_hdr(skb);
1538 const struct iphdr *iph = ip_hdr(skb);
1540 struct request_sock **prev;
1541 /* Find possible connection requests. */
1542 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1543 iph->saddr, iph->daddr);
1545 return tcp_check_req(sk, skb, req, prev, false);
1547 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1548 th->source, iph->daddr, th->dest, inet_iif(skb));
1551 if (nsk->sk_state != TCP_TIME_WAIT) {
1555 inet_twsk_put(inet_twsk(nsk));
1559 #ifdef CONFIG_SYN_COOKIES
1561 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1566 /* The socket must have it's spinlock held when we get
1569 * We have a potential double-lock case here, so even when
1570 * doing backlog processing we use the BH locking scheme.
1571 * This is because we cannot sleep with the original spinlock
1574 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1577 #ifdef CONFIG_TCP_MD5SIG
1579 * We really want to reject the packet as early as possible
1581 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1582 * o There is an MD5 option and we're not expecting one
1584 if (tcp_v4_inbound_md5_hash(sk, skb))
1588 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1589 struct dst_entry *dst = sk->sk_rx_dst;
1591 sock_rps_save_rxhash(sk, skb);
1593 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1594 dst->ops->check(dst, 0) == NULL) {
1596 sk->sk_rx_dst = NULL;
1599 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1603 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1606 if (sk->sk_state == TCP_LISTEN) {
1607 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1612 sock_rps_save_rxhash(nsk, skb);
1613 if (tcp_child_process(sk, nsk, skb)) {
1620 sock_rps_save_rxhash(sk, skb);
1622 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1629 tcp_v4_send_reset(rsk, skb);
1632 /* Be careful here. If this function gets more complicated and
1633 * gcc suffers from register pressure on the x86, sk (in %ebx)
1634 * might be destroyed here. This current version compiles correctly,
1635 * but you have been warned.
1640 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1641 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1644 EXPORT_SYMBOL(tcp_v4_do_rcv);
1646 void tcp_v4_early_demux(struct sk_buff *skb)
1648 const struct iphdr *iph;
1649 const struct tcphdr *th;
1652 if (skb->pkt_type != PACKET_HOST)
1655 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1661 if (th->doff < sizeof(struct tcphdr) / 4)
1664 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1665 iph->saddr, th->source,
1666 iph->daddr, ntohs(th->dest),
1670 skb->destructor = sock_edemux;
1671 if (sk->sk_state != TCP_TIME_WAIT) {
1672 struct dst_entry *dst = sk->sk_rx_dst;
1675 dst = dst_check(dst, 0);
1677 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1678 skb_dst_set_noref(skb, dst);
1683 /* Packet is added to VJ-style prequeue for processing in process
1684 * context, if a reader task is waiting. Apparently, this exciting
1685 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1686 * failed somewhere. Latency? Burstiness? Well, at least now we will
1687 * see, why it failed. 8)8) --ANK
1690 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1692 struct tcp_sock *tp = tcp_sk(sk);
1694 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1697 if (skb->len <= tcp_hdrlen(skb) &&
1698 skb_queue_len(&tp->ucopy.prequeue) == 0)
1702 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1703 tp->ucopy.memory += skb->truesize;
1704 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1705 struct sk_buff *skb1;
1707 BUG_ON(sock_owned_by_user(sk));
1709 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1710 sk_backlog_rcv(sk, skb1);
1711 NET_INC_STATS_BH(sock_net(sk),
1712 LINUX_MIB_TCPPREQUEUEDROPPED);
1715 tp->ucopy.memory = 0;
1716 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1717 wake_up_interruptible_sync_poll(sk_sleep(sk),
1718 POLLIN | POLLRDNORM | POLLRDBAND);
1719 if (!inet_csk_ack_scheduled(sk))
1720 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1721 (3 * tcp_rto_min(sk)) / 4,
1726 EXPORT_SYMBOL(tcp_prequeue);
1732 int tcp_v4_rcv(struct sk_buff *skb)
1734 const struct iphdr *iph;
1735 const struct tcphdr *th;
1738 struct net *net = dev_net(skb->dev);
1740 if (skb->pkt_type != PACKET_HOST)
1743 /* Count it even if it's bad */
1744 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1746 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1751 if (th->doff < sizeof(struct tcphdr) / 4)
1753 if (!pskb_may_pull(skb, th->doff * 4))
1756 /* An explanation is required here, I think.
1757 * Packet length and doff are validated by header prediction,
1758 * provided case of th->doff==0 is eliminated.
1759 * So, we defer the checks. */
1761 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1766 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1767 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1768 skb->len - th->doff * 4);
1769 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1770 TCP_SKB_CB(skb)->when = 0;
1771 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1772 TCP_SKB_CB(skb)->sacked = 0;
1774 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1779 if (sk->sk_state == TCP_TIME_WAIT)
1782 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1783 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1784 goto discard_and_relse;
1787 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1788 goto discard_and_relse;
1791 if (sk_filter(sk, skb))
1792 goto discard_and_relse;
1794 sk_mark_napi_id(sk, skb);
1797 bh_lock_sock_nested(sk);
1799 if (!sock_owned_by_user(sk)) {
1800 #ifdef CONFIG_NET_DMA
1801 struct tcp_sock *tp = tcp_sk(sk);
1802 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1803 tp->ucopy.dma_chan = net_dma_find_channel();
1804 if (tp->ucopy.dma_chan)
1805 ret = tcp_v4_do_rcv(sk, skb);
1809 if (!tcp_prequeue(sk, skb))
1810 ret = tcp_v4_do_rcv(sk, skb);
1812 } else if (unlikely(sk_add_backlog(sk, skb,
1813 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1815 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1816 goto discard_and_relse;
1825 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1828 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1830 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1832 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1834 tcp_v4_send_reset(NULL, skb);
1838 /* Discard frame. */
1847 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1848 inet_twsk_put(inet_twsk(sk));
1852 if (skb->len < (th->doff << 2)) {
1853 inet_twsk_put(inet_twsk(sk));
1856 if (tcp_checksum_complete(skb)) {
1857 inet_twsk_put(inet_twsk(sk));
1860 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1862 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1864 iph->saddr, th->source,
1865 iph->daddr, th->dest,
1868 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1869 inet_twsk_put(inet_twsk(sk));
1873 /* Fall through to ACK */
1876 tcp_v4_timewait_ack(sk, skb);
1880 case TCP_TW_SUCCESS:;
1885 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1886 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1887 .twsk_unique = tcp_twsk_unique,
1888 .twsk_destructor= tcp_twsk_destructor,
1891 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1893 struct dst_entry *dst = skb_dst(skb);
1896 sk->sk_rx_dst = dst;
1897 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1899 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1901 const struct inet_connection_sock_af_ops ipv4_specific = {
1902 .queue_xmit = ip_queue_xmit,
1903 .send_check = tcp_v4_send_check,
1904 .rebuild_header = inet_sk_rebuild_header,
1905 .sk_rx_dst_set = inet_sk_rx_dst_set,
1906 .conn_request = tcp_v4_conn_request,
1907 .syn_recv_sock = tcp_v4_syn_recv_sock,
1908 .net_header_len = sizeof(struct iphdr),
1909 .setsockopt = ip_setsockopt,
1910 .getsockopt = ip_getsockopt,
1911 .addr2sockaddr = inet_csk_addr2sockaddr,
1912 .sockaddr_len = sizeof(struct sockaddr_in),
1913 .bind_conflict = inet_csk_bind_conflict,
1914 #ifdef CONFIG_COMPAT
1915 .compat_setsockopt = compat_ip_setsockopt,
1916 .compat_getsockopt = compat_ip_getsockopt,
1919 EXPORT_SYMBOL(ipv4_specific);
1921 #ifdef CONFIG_TCP_MD5SIG
1922 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1923 .md5_lookup = tcp_v4_md5_lookup,
1924 .calc_md5_hash = tcp_v4_md5_hash_skb,
1925 .md5_parse = tcp_v4_parse_md5_keys,
1929 /* NOTE: A lot of things set to zero explicitly by call to
1930 * sk_alloc() so need not be done here.
1932 static int tcp_v4_init_sock(struct sock *sk)
1934 struct inet_connection_sock *icsk = inet_csk(sk);
1938 icsk->icsk_af_ops = &ipv4_specific;
1940 #ifdef CONFIG_TCP_MD5SIG
1941 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1947 void tcp_v4_destroy_sock(struct sock *sk)
1949 struct tcp_sock *tp = tcp_sk(sk);
1951 tcp_clear_xmit_timers(sk);
1953 tcp_cleanup_congestion_control(sk);
1955 /* Cleanup up the write buffer. */
1956 tcp_write_queue_purge(sk);
1958 /* Cleans up our, hopefully empty, out_of_order_queue. */
1959 __skb_queue_purge(&tp->out_of_order_queue);
1961 #ifdef CONFIG_TCP_MD5SIG
1962 /* Clean up the MD5 key list, if any */
1963 if (tp->md5sig_info) {
1964 tcp_clear_md5_list(sk);
1965 kfree_rcu(tp->md5sig_info, rcu);
1966 tp->md5sig_info = NULL;
1970 #ifdef CONFIG_NET_DMA
1971 /* Cleans up our sk_async_wait_queue */
1972 __skb_queue_purge(&sk->sk_async_wait_queue);
1975 /* Clean prequeue, it must be empty really */
1976 __skb_queue_purge(&tp->ucopy.prequeue);
1978 /* Clean up a referenced TCP bind bucket. */
1979 if (inet_csk(sk)->icsk_bind_hash)
1982 BUG_ON(tp->fastopen_rsk != NULL);
1984 /* If socket is aborted during connect operation */
1985 tcp_free_fastopen_req(tp);
1987 sk_sockets_allocated_dec(sk);
1988 sock_release_memcg(sk);
1990 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1992 #ifdef CONFIG_PROC_FS
1993 /* Proc filesystem TCP sock list dumping. */
1996 * Get next listener socket follow cur. If cur is NULL, get first socket
1997 * starting from bucket given in st->bucket; when st->bucket is zero the
1998 * very first socket in the hash table is returned.
2000 static void *listening_get_next(struct seq_file *seq, void *cur)
2002 struct inet_connection_sock *icsk;
2003 struct hlist_nulls_node *node;
2004 struct sock *sk = cur;
2005 struct inet_listen_hashbucket *ilb;
2006 struct tcp_iter_state *st = seq->private;
2007 struct net *net = seq_file_net(seq);
2010 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2011 spin_lock_bh(&ilb->lock);
2012 sk = sk_nulls_head(&ilb->head);
2016 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2020 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2021 struct request_sock *req = cur;
2023 icsk = inet_csk(st->syn_wait_sk);
2027 if (req->rsk_ops->family == st->family) {
2033 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2036 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2038 sk = sk_nulls_next(st->syn_wait_sk);
2039 st->state = TCP_SEQ_STATE_LISTENING;
2040 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2042 icsk = inet_csk(sk);
2043 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2044 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2046 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2047 sk = sk_nulls_next(sk);
2050 sk_nulls_for_each_from(sk, node) {
2051 if (!net_eq(sock_net(sk), net))
2053 if (sk->sk_family == st->family) {
2057 icsk = inet_csk(sk);
2058 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2059 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2061 st->uid = sock_i_uid(sk);
2062 st->syn_wait_sk = sk;
2063 st->state = TCP_SEQ_STATE_OPENREQ;
2067 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2069 spin_unlock_bh(&ilb->lock);
2071 if (++st->bucket < INET_LHTABLE_SIZE) {
2072 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2073 spin_lock_bh(&ilb->lock);
2074 sk = sk_nulls_head(&ilb->head);
2082 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2084 struct tcp_iter_state *st = seq->private;
2089 rc = listening_get_next(seq, NULL);
2091 while (rc && *pos) {
2092 rc = listening_get_next(seq, rc);
2098 static inline bool empty_bucket(const struct tcp_iter_state *st)
2100 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2104 * Get first established socket starting from bucket given in st->bucket.
2105 * If st->bucket is zero, the very first socket in the hash is returned.
2107 static void *established_get_first(struct seq_file *seq)
2109 struct tcp_iter_state *st = seq->private;
2110 struct net *net = seq_file_net(seq);
2114 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2116 struct hlist_nulls_node *node;
2117 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2119 /* Lockless fast path for the common case of empty buckets */
2120 if (empty_bucket(st))
2124 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2125 if (sk->sk_family != st->family ||
2126 !net_eq(sock_net(sk), net)) {
2132 spin_unlock_bh(lock);
2138 static void *established_get_next(struct seq_file *seq, void *cur)
2140 struct sock *sk = cur;
2141 struct hlist_nulls_node *node;
2142 struct tcp_iter_state *st = seq->private;
2143 struct net *net = seq_file_net(seq);
2148 sk = sk_nulls_next(sk);
2150 sk_nulls_for_each_from(sk, node) {
2151 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2155 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2157 return established_get_first(seq);
2160 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2162 struct tcp_iter_state *st = seq->private;
2166 rc = established_get_first(seq);
2169 rc = established_get_next(seq, rc);
2175 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2178 struct tcp_iter_state *st = seq->private;
2180 st->state = TCP_SEQ_STATE_LISTENING;
2181 rc = listening_get_idx(seq, &pos);
2184 st->state = TCP_SEQ_STATE_ESTABLISHED;
2185 rc = established_get_idx(seq, pos);
2191 static void *tcp_seek_last_pos(struct seq_file *seq)
2193 struct tcp_iter_state *st = seq->private;
2194 int offset = st->offset;
2195 int orig_num = st->num;
2198 switch (st->state) {
2199 case TCP_SEQ_STATE_OPENREQ:
2200 case TCP_SEQ_STATE_LISTENING:
2201 if (st->bucket >= INET_LHTABLE_SIZE)
2203 st->state = TCP_SEQ_STATE_LISTENING;
2204 rc = listening_get_next(seq, NULL);
2205 while (offset-- && rc)
2206 rc = listening_get_next(seq, rc);
2210 st->state = TCP_SEQ_STATE_ESTABLISHED;
2212 case TCP_SEQ_STATE_ESTABLISHED:
2213 if (st->bucket > tcp_hashinfo.ehash_mask)
2215 rc = established_get_first(seq);
2216 while (offset-- && rc)
2217 rc = established_get_next(seq, rc);
2225 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2227 struct tcp_iter_state *st = seq->private;
2230 if (*pos && *pos == st->last_pos) {
2231 rc = tcp_seek_last_pos(seq);
2236 st->state = TCP_SEQ_STATE_LISTENING;
2240 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2243 st->last_pos = *pos;
2247 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2249 struct tcp_iter_state *st = seq->private;
2252 if (v == SEQ_START_TOKEN) {
2253 rc = tcp_get_idx(seq, 0);
2257 switch (st->state) {
2258 case TCP_SEQ_STATE_OPENREQ:
2259 case TCP_SEQ_STATE_LISTENING:
2260 rc = listening_get_next(seq, v);
2262 st->state = TCP_SEQ_STATE_ESTABLISHED;
2265 rc = established_get_first(seq);
2268 case TCP_SEQ_STATE_ESTABLISHED:
2269 rc = established_get_next(seq, v);
2274 st->last_pos = *pos;
2278 static void tcp_seq_stop(struct seq_file *seq, void *v)
2280 struct tcp_iter_state *st = seq->private;
2282 switch (st->state) {
2283 case TCP_SEQ_STATE_OPENREQ:
2285 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2286 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2288 case TCP_SEQ_STATE_LISTENING:
2289 if (v != SEQ_START_TOKEN)
2290 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2292 case TCP_SEQ_STATE_ESTABLISHED:
2294 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2299 int tcp_seq_open(struct inode *inode, struct file *file)
2301 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2302 struct tcp_iter_state *s;
2305 err = seq_open_net(inode, file, &afinfo->seq_ops,
2306 sizeof(struct tcp_iter_state));
2310 s = ((struct seq_file *)file->private_data)->private;
2311 s->family = afinfo->family;
2315 EXPORT_SYMBOL(tcp_seq_open);
2317 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2320 struct proc_dir_entry *p;
2322 afinfo->seq_ops.start = tcp_seq_start;
2323 afinfo->seq_ops.next = tcp_seq_next;
2324 afinfo->seq_ops.stop = tcp_seq_stop;
2326 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2327 afinfo->seq_fops, afinfo);
2332 EXPORT_SYMBOL(tcp_proc_register);
2334 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2336 remove_proc_entry(afinfo->name, net->proc_net);
2338 EXPORT_SYMBOL(tcp_proc_unregister);
2340 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2341 struct seq_file *f, int i, kuid_t uid)
2343 const struct inet_request_sock *ireq = inet_rsk(req);
2344 long delta = req->expires - jiffies;
2346 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2347 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2350 ntohs(inet_sk(sk)->inet_sport),
2352 ntohs(ireq->ir_rmt_port),
2354 0, 0, /* could print option size, but that is af dependent. */
2355 1, /* timers active (only the expire timer) */
2356 jiffies_delta_to_clock_t(delta),
2358 from_kuid_munged(seq_user_ns(f), uid),
2359 0, /* non standard timer */
2360 0, /* open_requests have no inode */
2361 atomic_read(&sk->sk_refcnt),
2365 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2368 unsigned long timer_expires;
2369 const struct tcp_sock *tp = tcp_sk(sk);
2370 const struct inet_connection_sock *icsk = inet_csk(sk);
2371 const struct inet_sock *inet = inet_sk(sk);
2372 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2373 __be32 dest = inet->inet_daddr;
2374 __be32 src = inet->inet_rcv_saddr;
2375 __u16 destp = ntohs(inet->inet_dport);
2376 __u16 srcp = ntohs(inet->inet_sport);
2379 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2380 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2381 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2383 timer_expires = icsk->icsk_timeout;
2384 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2386 timer_expires = icsk->icsk_timeout;
2387 } else if (timer_pending(&sk->sk_timer)) {
2389 timer_expires = sk->sk_timer.expires;
2392 timer_expires = jiffies;
2395 if (sk->sk_state == TCP_LISTEN)
2396 rx_queue = sk->sk_ack_backlog;
2399 * because we dont lock socket, we might find a transient negative value
2401 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2403 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2404 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2405 i, src, srcp, dest, destp, sk->sk_state,
2406 tp->write_seq - tp->snd_una,
2409 jiffies_delta_to_clock_t(timer_expires - jiffies),
2410 icsk->icsk_retransmits,
2411 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2412 icsk->icsk_probes_out,
2414 atomic_read(&sk->sk_refcnt), sk,
2415 jiffies_to_clock_t(icsk->icsk_rto),
2416 jiffies_to_clock_t(icsk->icsk_ack.ato),
2417 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2419 sk->sk_state == TCP_LISTEN ?
2420 (fastopenq ? fastopenq->max_qlen : 0) :
2421 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2424 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2425 struct seq_file *f, int i)
2429 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2431 dest = tw->tw_daddr;
2432 src = tw->tw_rcv_saddr;
2433 destp = ntohs(tw->tw_dport);
2434 srcp = ntohs(tw->tw_sport);
2436 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2437 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2438 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2439 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2440 atomic_read(&tw->tw_refcnt), tw);
2445 static int tcp4_seq_show(struct seq_file *seq, void *v)
2447 struct tcp_iter_state *st;
2448 struct sock *sk = v;
2450 seq_setwidth(seq, TMPSZ - 1);
2451 if (v == SEQ_START_TOKEN) {
2452 seq_puts(seq, " sl local_address rem_address st tx_queue "
2453 "rx_queue tr tm->when retrnsmt uid timeout "
2459 switch (st->state) {
2460 case TCP_SEQ_STATE_LISTENING:
2461 case TCP_SEQ_STATE_ESTABLISHED:
2462 if (sk->sk_state == TCP_TIME_WAIT)
2463 get_timewait4_sock(v, seq, st->num);
2465 get_tcp4_sock(v, seq, st->num);
2467 case TCP_SEQ_STATE_OPENREQ:
2468 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2476 static const struct file_operations tcp_afinfo_seq_fops = {
2477 .owner = THIS_MODULE,
2478 .open = tcp_seq_open,
2480 .llseek = seq_lseek,
2481 .release = seq_release_net
2484 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2487 .seq_fops = &tcp_afinfo_seq_fops,
2489 .show = tcp4_seq_show,
2493 static int __net_init tcp4_proc_init_net(struct net *net)
2495 return tcp_proc_register(net, &tcp4_seq_afinfo);
2498 static void __net_exit tcp4_proc_exit_net(struct net *net)
2500 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2503 static struct pernet_operations tcp4_net_ops = {
2504 .init = tcp4_proc_init_net,
2505 .exit = tcp4_proc_exit_net,
2508 int __init tcp4_proc_init(void)
2510 return register_pernet_subsys(&tcp4_net_ops);
2513 void tcp4_proc_exit(void)
2515 unregister_pernet_subsys(&tcp4_net_ops);
2517 #endif /* CONFIG_PROC_FS */
2519 struct proto tcp_prot = {
2521 .owner = THIS_MODULE,
2523 .connect = tcp_v4_connect,
2524 .disconnect = tcp_disconnect,
2525 .accept = inet_csk_accept,
2527 .init = tcp_v4_init_sock,
2528 .destroy = tcp_v4_destroy_sock,
2529 .shutdown = tcp_shutdown,
2530 .setsockopt = tcp_setsockopt,
2531 .getsockopt = tcp_getsockopt,
2532 .recvmsg = tcp_recvmsg,
2533 .sendmsg = tcp_sendmsg,
2534 .sendpage = tcp_sendpage,
2535 .backlog_rcv = tcp_v4_do_rcv,
2536 .release_cb = tcp_release_cb,
2537 .mtu_reduced = tcp_v4_mtu_reduced,
2539 .unhash = inet_unhash,
2540 .get_port = inet_csk_get_port,
2541 .enter_memory_pressure = tcp_enter_memory_pressure,
2542 .stream_memory_free = tcp_stream_memory_free,
2543 .sockets_allocated = &tcp_sockets_allocated,
2544 .orphan_count = &tcp_orphan_count,
2545 .memory_allocated = &tcp_memory_allocated,
2546 .memory_pressure = &tcp_memory_pressure,
2547 .sysctl_mem = sysctl_tcp_mem,
2548 .sysctl_wmem = sysctl_tcp_wmem,
2549 .sysctl_rmem = sysctl_tcp_rmem,
2550 .max_header = MAX_TCP_HEADER,
2551 .obj_size = sizeof(struct tcp_sock),
2552 .slab_flags = SLAB_DESTROY_BY_RCU,
2553 .twsk_prot = &tcp_timewait_sock_ops,
2554 .rsk_prot = &tcp_request_sock_ops,
2555 .h.hashinfo = &tcp_hashinfo,
2556 .no_autobind = true,
2557 #ifdef CONFIG_COMPAT
2558 .compat_setsockopt = compat_tcp_setsockopt,
2559 .compat_getsockopt = compat_tcp_getsockopt,
2561 #ifdef CONFIG_MEMCG_KMEM
2562 .init_cgroup = tcp_init_cgroup,
2563 .destroy_cgroup = tcp_destroy_cgroup,
2564 .proto_cgroup = tcp_proto_cgroup,
2567 EXPORT_SYMBOL(tcp_prot);
2569 static int __net_init tcp_sk_init(struct net *net)
2571 net->ipv4.sysctl_tcp_ecn = 2;
2575 static void __net_exit tcp_sk_exit(struct net *net)
2579 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2581 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2584 static struct pernet_operations __net_initdata tcp_sk_ops = {
2585 .init = tcp_sk_init,
2586 .exit = tcp_sk_exit,
2587 .exit_batch = tcp_sk_exit_batch,
2590 void __init tcp_v4_init(void)
2592 inet_hashinfo_init(&tcp_hashinfo);
2593 if (register_pernet_subsys(&tcp_sk_ops))
2594 panic("Failed to create the TCP control socket.\n");