2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 #include <net/busy_poll.h>
80 #include <linux/inet.h>
81 #include <linux/ipv6.h>
82 #include <linux/stddef.h>
83 #include <linux/proc_fs.h>
84 #include <linux/seq_file.h>
86 #include <linux/crypto.h>
87 #include <linux/scatterlist.h>
89 int sysctl_tcp_tw_reuse __read_mostly;
90 int sysctl_tcp_low_latency __read_mostly;
91 EXPORT_SYMBOL(sysctl_tcp_low_latency);
94 #ifdef CONFIG_TCP_MD5SIG
95 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
96 __be32 daddr, __be32 saddr, const struct tcphdr *th);
99 struct inet_hashinfo tcp_hashinfo;
100 EXPORT_SYMBOL(tcp_hashinfo);
102 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
104 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
107 tcp_hdr(skb)->source);
110 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
113 struct tcp_sock *tp = tcp_sk(sk);
115 /* With PAWS, it is safe from the viewpoint
116 of data integrity. Even without PAWS it is safe provided sequence
117 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
119 Actually, the idea is close to VJ's one, only timestamp cache is
120 held not per host, but per port pair and TW bucket is used as state
123 If TW bucket has been already destroyed we fall back to VJ's scheme
124 and use initial timestamp retrieved from peer table.
126 if (tcptw->tw_ts_recent_stamp &&
127 (twp == NULL || (sysctl_tcp_tw_reuse &&
128 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
129 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
130 if (tp->write_seq == 0)
132 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
133 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
140 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
142 /* This will initiate an outgoing connection. */
143 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
145 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
146 struct inet_sock *inet = inet_sk(sk);
147 struct tcp_sock *tp = tcp_sk(sk);
148 __be16 orig_sport, orig_dport;
149 __be32 daddr, nexthop;
153 struct ip_options_rcu *inet_opt;
155 if (addr_len < sizeof(struct sockaddr_in))
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
161 nexthop = daddr = usin->sin_addr.s_addr;
162 inet_opt = rcu_dereference_protected(inet->inet_opt,
163 sock_owned_by_user(sk));
164 if (inet_opt && inet_opt->opt.srr) {
167 nexthop = inet_opt->opt.faddr;
170 orig_sport = inet->inet_sport;
171 orig_dport = usin->sin_port;
172 fl4 = &inet->cork.fl.u.ip4;
173 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
174 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
176 orig_sport, orig_dport, sk, true);
179 if (err == -ENETUNREACH)
180 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
184 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
189 if (!inet_opt || !inet_opt->opt.srr)
192 if (!inet->inet_saddr)
193 inet->inet_saddr = fl4->saddr;
194 inet->inet_rcv_saddr = inet->inet_saddr;
196 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
197 /* Reset inherited state */
198 tp->rx_opt.ts_recent = 0;
199 tp->rx_opt.ts_recent_stamp = 0;
200 if (likely(!tp->repair))
204 if (tcp_death_row.sysctl_tw_recycle &&
205 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
206 tcp_fetch_timewait_stamp(sk, &rt->dst);
208 inet->inet_dport = usin->sin_port;
209 inet->inet_daddr = daddr;
211 inet_csk(sk)->icsk_ext_hdr_len = 0;
213 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
215 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
217 /* Socket identity is still unknown (sport may be zero).
218 * However we set state to SYN-SENT and not releasing socket
219 * lock select source port, enter ourselves into the hash tables and
220 * complete initialization after this.
222 tcp_set_state(sk, TCP_SYN_SENT);
223 err = inet_hash_connect(&tcp_death_row, sk);
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
244 inet->inet_id = tp->write_seq ^ jiffies;
246 err = tcp_connect(sk);
256 * This unhashes the socket and releases the local port,
259 tcp_set_state(sk, TCP_CLOSE);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
265 EXPORT_SYMBOL(tcp_v4_connect);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 static void tcp_v4_mtu_reduced(struct sock *sk)
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
278 dst = inet_csk_update_pmtu(sk, mtu);
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
292 tcp_sync_mss(sk, mtu);
294 /* Resend the TCP packet because it's
295 * clear that the old packet has been
296 * dropped. This is the new "fast" path mtu
299 tcp_simple_retransmit(sk);
300 } /* else let the usual retransmit timer handle it */
303 static void do_redirect(struct sk_buff *skb, struct sock *sk)
305 struct dst_entry *dst = __sk_dst_check(sk, 0);
308 dst->ops->redirect(dst, sk, skb);
312 * This routine is called by the ICMP module when it gets some
313 * sort of error condition. If err < 0 then the socket should
314 * be closed and the error returned to the user. If err > 0
315 * it's just the icmp type << 8 | icmp code. After adjustment
316 * header points to the first 8 bytes of the tcp header. We need
317 * to find the appropriate port.
319 * The locking strategy used here is very "optimistic". When
320 * someone else accesses the socket the ICMP is just dropped
321 * and for some paths there is no check at all.
322 * A more general error queue to queue errors for later handling
323 * is probably better.
327 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
329 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
330 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
331 struct inet_connection_sock *icsk;
333 struct inet_sock *inet;
334 const int type = icmp_hdr(icmp_skb)->type;
335 const int code = icmp_hdr(icmp_skb)->code;
338 struct request_sock *req;
342 struct net *net = dev_net(icmp_skb->dev);
344 if (icmp_skb->len < (iph->ihl << 2) + 8) {
345 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
349 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
350 iph->saddr, th->source, inet_iif(icmp_skb));
352 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
355 if (sk->sk_state == TCP_TIME_WAIT) {
356 inet_twsk_put(inet_twsk(sk));
361 /* If too many ICMPs get dropped on busy
362 * servers this needs to be solved differently.
363 * We do take care of PMTU discovery (RFC1191) special case :
364 * we can receive locally generated ICMP messages while socket is held.
366 if (sock_owned_by_user(sk)) {
367 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
368 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
370 if (sk->sk_state == TCP_CLOSE)
373 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
380 req = tp->fastopen_rsk;
381 seq = ntohl(th->seq);
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, tp->snd_una, tp->snd_nxt) &&
384 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
385 /* For a Fast Open socket, allow seq to be snt_isn. */
386 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
392 do_redirect(icmp_skb, sk);
394 case ICMP_SOURCE_QUENCH:
395 /* Just silently ignore these. */
397 case ICMP_PARAMETERPROB:
400 case ICMP_DEST_UNREACH:
401 if (code > NR_ICMP_UNREACH)
404 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
405 /* We are not interested in TCP_LISTEN and open_requests
406 * (SYN-ACKs send out by Linux are always <576bytes so
407 * they should go through unfragmented).
409 if (sk->sk_state == TCP_LISTEN)
413 if (!sock_owned_by_user(sk)) {
414 tcp_v4_mtu_reduced(sk);
416 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
422 err = icmp_err_convert[code].errno;
423 /* check if icmp_skb allows revert of backoff
424 * (see draft-zimmermann-tcp-lcd) */
425 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
427 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
431 /* XXX (TFO) - revisit the following logic for TFO */
433 if (sock_owned_by_user(sk))
436 icsk->icsk_backoff--;
437 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
438 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
441 skb = tcp_write_queue_head(sk);
444 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
445 tcp_time_stamp - TCP_SKB_CB(skb)->when);
448 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
449 remaining, TCP_RTO_MAX);
451 /* RTO revert clocked out retransmission.
452 * Will retransmit now */
453 tcp_retransmit_timer(sk);
457 case ICMP_TIME_EXCEEDED:
464 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
465 * than following the TCP_SYN_RECV case and closing the socket,
466 * we ignore the ICMP error and keep trying like a fully established
467 * socket. Is this the right thing to do?
469 if (req && req->sk == NULL)
472 switch (sk->sk_state) {
473 struct request_sock *req, **prev;
475 if (sock_owned_by_user(sk))
478 req = inet_csk_search_req(sk, &prev, th->dest,
479 iph->daddr, iph->saddr);
483 /* ICMPs are not backlogged, hence we cannot get
484 an established socket here.
488 if (seq != tcp_rsk(req)->snt_isn) {
489 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
494 * Still in SYN_RECV, just remove it silently.
495 * There is no good way to pass the error to the newly
496 * created socket, and POSIX does not want network
497 * errors returned from accept().
499 inet_csk_reqsk_queue_drop(sk, req, prev);
500 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
504 case TCP_SYN_RECV: /* Cannot happen.
505 It can f.e. if SYNs crossed,
508 if (!sock_owned_by_user(sk)) {
511 sk->sk_error_report(sk);
515 sk->sk_err_soft = err;
520 /* If we've already connected we will keep trying
521 * until we time out, or the user gives up.
523 * rfc1122 4.2.3.9 allows to consider as hard errors
524 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
525 * but it is obsoleted by pmtu discovery).
527 * Note, that in modern internet, where routing is unreliable
528 * and in each dark corner broken firewalls sit, sending random
529 * errors ordered by their masters even this two messages finally lose
530 * their original sense (even Linux sends invalid PORT_UNREACHs)
532 * Now we are in compliance with RFCs.
537 if (!sock_owned_by_user(sk) && inet->recverr) {
539 sk->sk_error_report(sk);
540 } else { /* Only an error on timeout */
541 sk->sk_err_soft = err;
549 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
551 struct tcphdr *th = tcp_hdr(skb);
553 if (skb->ip_summed == CHECKSUM_PARTIAL) {
554 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
555 skb->csum_start = skb_transport_header(skb) - skb->head;
556 skb->csum_offset = offsetof(struct tcphdr, check);
558 th->check = tcp_v4_check(skb->len, saddr, daddr,
565 /* This routine computes an IPv4 TCP checksum. */
566 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
568 const struct inet_sock *inet = inet_sk(sk);
570 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
572 EXPORT_SYMBOL(tcp_v4_send_check);
575 * This routine will send an RST to the other tcp.
577 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
579 * Answer: if a packet caused RST, it is not for a socket
580 * existing in our system, if it is matched to a socket,
581 * it is just duplicate segment or bug in other side's TCP.
582 * So that we build reply only basing on parameters
583 * arrived with segment.
584 * Exception: precedence violation. We do not implement it in any case.
587 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
589 const struct tcphdr *th = tcp_hdr(skb);
592 #ifdef CONFIG_TCP_MD5SIG
593 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
596 struct ip_reply_arg arg;
597 #ifdef CONFIG_TCP_MD5SIG
598 struct tcp_md5sig_key *key;
599 const __u8 *hash_location = NULL;
600 unsigned char newhash[16];
602 struct sock *sk1 = NULL;
606 /* Never send a reset in response to a reset. */
610 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
613 /* Swap the send and the receive. */
614 memset(&rep, 0, sizeof(rep));
615 rep.th.dest = th->source;
616 rep.th.source = th->dest;
617 rep.th.doff = sizeof(struct tcphdr) / 4;
621 rep.th.seq = th->ack_seq;
624 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
625 skb->len - (th->doff << 2));
628 memset(&arg, 0, sizeof(arg));
629 arg.iov[0].iov_base = (unsigned char *)&rep;
630 arg.iov[0].iov_len = sizeof(rep.th);
632 #ifdef CONFIG_TCP_MD5SIG
633 hash_location = tcp_parse_md5sig_option(th);
634 if (!sk && hash_location) {
636 * active side is lost. Try to find listening socket through
637 * source port, and then find md5 key through listening socket.
638 * we are not loose security here:
639 * Incoming packet is checked with md5 hash with finding key,
640 * no RST generated if md5 hash doesn't match.
642 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
643 &tcp_hashinfo, ip_hdr(skb)->saddr,
644 th->source, ip_hdr(skb)->daddr,
645 ntohs(th->source), inet_iif(skb));
646 /* don't send rst if it can't find key */
650 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
651 &ip_hdr(skb)->saddr, AF_INET);
655 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
656 if (genhash || memcmp(hash_location, newhash, 16) != 0)
659 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
665 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
667 (TCPOPT_MD5SIG << 8) |
669 /* Update length and the length the header thinks exists */
670 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
671 rep.th.doff = arg.iov[0].iov_len / 4;
673 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
674 key, ip_hdr(skb)->saddr,
675 ip_hdr(skb)->daddr, &rep.th);
678 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
679 ip_hdr(skb)->saddr, /* XXX */
680 arg.iov[0].iov_len, IPPROTO_TCP, 0);
681 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
682 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
683 /* When socket is gone, all binding information is lost.
684 * routing might fail in this case. No choice here, if we choose to force
685 * input interface, we will misroute in case of asymmetric route.
688 arg.bound_dev_if = sk->sk_bound_dev_if;
690 net = dev_net(skb_dst(skb)->dev);
691 arg.tos = ip_hdr(skb)->tos;
692 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
693 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
695 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
696 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
698 #ifdef CONFIG_TCP_MD5SIG
707 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
708 outside socket context is ugly, certainly. What can I do?
711 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
712 u32 win, u32 tsval, u32 tsecr, int oif,
713 struct tcp_md5sig_key *key,
714 int reply_flags, u8 tos)
716 const struct tcphdr *th = tcp_hdr(skb);
719 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
720 #ifdef CONFIG_TCP_MD5SIG
721 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
725 struct ip_reply_arg arg;
726 struct net *net = dev_net(skb_dst(skb)->dev);
728 memset(&rep.th, 0, sizeof(struct tcphdr));
729 memset(&arg, 0, sizeof(arg));
731 arg.iov[0].iov_base = (unsigned char *)&rep;
732 arg.iov[0].iov_len = sizeof(rep.th);
734 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
735 (TCPOPT_TIMESTAMP << 8) |
737 rep.opt[1] = htonl(tsval);
738 rep.opt[2] = htonl(tsecr);
739 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
742 /* Swap the send and the receive. */
743 rep.th.dest = th->source;
744 rep.th.source = th->dest;
745 rep.th.doff = arg.iov[0].iov_len / 4;
746 rep.th.seq = htonl(seq);
747 rep.th.ack_seq = htonl(ack);
749 rep.th.window = htons(win);
751 #ifdef CONFIG_TCP_MD5SIG
753 int offset = (tsecr) ? 3 : 0;
755 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
757 (TCPOPT_MD5SIG << 8) |
759 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
760 rep.th.doff = arg.iov[0].iov_len/4;
762 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
763 key, ip_hdr(skb)->saddr,
764 ip_hdr(skb)->daddr, &rep.th);
767 arg.flags = reply_flags;
768 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
769 ip_hdr(skb)->saddr, /* XXX */
770 arg.iov[0].iov_len, IPPROTO_TCP, 0);
771 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
773 arg.bound_dev_if = oif;
775 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
776 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
778 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
781 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
783 struct inet_timewait_sock *tw = inet_twsk(sk);
784 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
786 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
787 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
788 tcp_time_stamp + tcptw->tw_ts_offset,
791 tcp_twsk_md5_key(tcptw),
792 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
799 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
800 struct request_sock *req)
802 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
803 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
805 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
806 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
807 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
811 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
813 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
818 * Send a SYN-ACK after having received a SYN.
819 * This still operates on a request_sock only, not on a big
822 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
823 struct request_sock *req,
827 const struct inet_request_sock *ireq = inet_rsk(req);
830 struct sk_buff * skb;
832 /* First, grab a route. */
833 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
836 skb = tcp_make_synack(sk, dst, req, NULL);
839 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
841 skb_set_queue_mapping(skb, queue_mapping);
842 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
845 err = net_xmit_eval(err);
846 if (!tcp_rsk(req)->snt_synack && !err)
847 tcp_rsk(req)->snt_synack = tcp_time_stamp;
853 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
855 int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
863 * IPv4 request_sock destructor.
865 static void tcp_v4_reqsk_destructor(struct request_sock *req)
867 kfree(inet_rsk(req)->opt);
871 * Return true if a syncookie should be sent
873 bool tcp_syn_flood_action(struct sock *sk,
874 const struct sk_buff *skb,
877 const char *msg = "Dropping request";
878 bool want_cookie = false;
879 struct listen_sock *lopt;
883 #ifdef CONFIG_SYN_COOKIES
884 if (sysctl_tcp_syncookies) {
885 msg = "Sending cookies";
887 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
890 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
892 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
893 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
894 lopt->synflood_warned = 1;
895 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
896 proto, ntohs(tcp_hdr(skb)->dest), msg);
900 EXPORT_SYMBOL(tcp_syn_flood_action);
903 * Save and compile IPv4 options into the request_sock if needed.
905 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
907 const struct ip_options *opt = &(IPCB(skb)->opt);
908 struct ip_options_rcu *dopt = NULL;
910 if (opt && opt->optlen) {
911 int opt_size = sizeof(*dopt) + opt->optlen;
913 dopt = kmalloc(opt_size, GFP_ATOMIC);
915 if (ip_options_echo(&dopt->opt, skb)) {
924 #ifdef CONFIG_TCP_MD5SIG
926 * RFC2385 MD5 checksumming requires a mapping of
927 * IP address->MD5 Key.
928 * We need to maintain these in the sk structure.
931 /* Find the Key structure for an address. */
932 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
933 const union tcp_md5_addr *addr,
936 struct tcp_sock *tp = tcp_sk(sk);
937 struct tcp_md5sig_key *key;
938 unsigned int size = sizeof(struct in_addr);
939 struct tcp_md5sig_info *md5sig;
941 /* caller either holds rcu_read_lock() or socket lock */
942 md5sig = rcu_dereference_check(tp->md5sig_info,
943 sock_owned_by_user(sk) ||
944 lockdep_is_held(&sk->sk_lock.slock));
947 #if IS_ENABLED(CONFIG_IPV6)
948 if (family == AF_INET6)
949 size = sizeof(struct in6_addr);
951 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
952 if (key->family != family)
954 if (!memcmp(&key->addr, addr, size))
959 EXPORT_SYMBOL(tcp_md5_do_lookup);
961 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
962 struct sock *addr_sk)
964 union tcp_md5_addr *addr;
966 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
967 return tcp_md5_do_lookup(sk, addr, AF_INET);
969 EXPORT_SYMBOL(tcp_v4_md5_lookup);
971 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
972 struct request_sock *req)
974 union tcp_md5_addr *addr;
976 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
977 return tcp_md5_do_lookup(sk, addr, AF_INET);
980 /* This can be called on a newly created socket, from other files */
981 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
982 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
984 /* Add Key to the list */
985 struct tcp_md5sig_key *key;
986 struct tcp_sock *tp = tcp_sk(sk);
987 struct tcp_md5sig_info *md5sig;
989 key = tcp_md5_do_lookup(sk, addr, family);
991 /* Pre-existing entry - just update that one. */
992 memcpy(key->key, newkey, newkeylen);
993 key->keylen = newkeylen;
997 md5sig = rcu_dereference_protected(tp->md5sig_info,
998 sock_owned_by_user(sk));
1000 md5sig = kmalloc(sizeof(*md5sig), gfp);
1004 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1005 INIT_HLIST_HEAD(&md5sig->head);
1006 rcu_assign_pointer(tp->md5sig_info, md5sig);
1009 key = sock_kmalloc(sk, sizeof(*key), gfp);
1012 if (!tcp_alloc_md5sig_pool()) {
1013 sock_kfree_s(sk, key, sizeof(*key));
1017 memcpy(key->key, newkey, newkeylen);
1018 key->keylen = newkeylen;
1019 key->family = family;
1020 memcpy(&key->addr, addr,
1021 (family == AF_INET6) ? sizeof(struct in6_addr) :
1022 sizeof(struct in_addr));
1023 hlist_add_head_rcu(&key->node, &md5sig->head);
1026 EXPORT_SYMBOL(tcp_md5_do_add);
1028 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1030 struct tcp_md5sig_key *key;
1032 key = tcp_md5_do_lookup(sk, addr, family);
1035 hlist_del_rcu(&key->node);
1036 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1037 kfree_rcu(key, rcu);
1040 EXPORT_SYMBOL(tcp_md5_do_del);
1042 static void tcp_clear_md5_list(struct sock *sk)
1044 struct tcp_sock *tp = tcp_sk(sk);
1045 struct tcp_md5sig_key *key;
1046 struct hlist_node *n;
1047 struct tcp_md5sig_info *md5sig;
1049 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1051 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1052 hlist_del_rcu(&key->node);
1053 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1054 kfree_rcu(key, rcu);
1058 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1061 struct tcp_md5sig cmd;
1062 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1064 if (optlen < sizeof(cmd))
1067 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1070 if (sin->sin_family != AF_INET)
1073 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1074 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1077 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1080 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1081 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1085 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1086 __be32 daddr, __be32 saddr, int nbytes)
1088 struct tcp4_pseudohdr *bp;
1089 struct scatterlist sg;
1091 bp = &hp->md5_blk.ip4;
1094 * 1. the TCP pseudo-header (in the order: source IP address,
1095 * destination IP address, zero-padded protocol number, and
1101 bp->protocol = IPPROTO_TCP;
1102 bp->len = cpu_to_be16(nbytes);
1104 sg_init_one(&sg, bp, sizeof(*bp));
1105 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1108 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1109 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1111 struct tcp_md5sig_pool *hp;
1112 struct hash_desc *desc;
1114 hp = tcp_get_md5sig_pool();
1116 goto clear_hash_noput;
1117 desc = &hp->md5_desc;
1119 if (crypto_hash_init(desc))
1121 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1123 if (tcp_md5_hash_header(hp, th))
1125 if (tcp_md5_hash_key(hp, key))
1127 if (crypto_hash_final(desc, md5_hash))
1130 tcp_put_md5sig_pool();
1134 tcp_put_md5sig_pool();
1136 memset(md5_hash, 0, 16);
1140 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1141 const struct sock *sk, const struct request_sock *req,
1142 const struct sk_buff *skb)
1144 struct tcp_md5sig_pool *hp;
1145 struct hash_desc *desc;
1146 const struct tcphdr *th = tcp_hdr(skb);
1147 __be32 saddr, daddr;
1150 saddr = inet_sk(sk)->inet_saddr;
1151 daddr = inet_sk(sk)->inet_daddr;
1153 saddr = inet_rsk(req)->loc_addr;
1154 daddr = inet_rsk(req)->rmt_addr;
1156 const struct iphdr *iph = ip_hdr(skb);
1161 hp = tcp_get_md5sig_pool();
1163 goto clear_hash_noput;
1164 desc = &hp->md5_desc;
1166 if (crypto_hash_init(desc))
1169 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1171 if (tcp_md5_hash_header(hp, th))
1173 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1175 if (tcp_md5_hash_key(hp, key))
1177 if (crypto_hash_final(desc, md5_hash))
1180 tcp_put_md5sig_pool();
1184 tcp_put_md5sig_pool();
1186 memset(md5_hash, 0, 16);
1189 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1191 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1194 * This gets called for each TCP segment that arrives
1195 * so we want to be efficient.
1196 * We have 3 drop cases:
1197 * o No MD5 hash and one expected.
1198 * o MD5 hash and we're not expecting one.
1199 * o MD5 hash and its wrong.
1201 const __u8 *hash_location = NULL;
1202 struct tcp_md5sig_key *hash_expected;
1203 const struct iphdr *iph = ip_hdr(skb);
1204 const struct tcphdr *th = tcp_hdr(skb);
1206 unsigned char newhash[16];
1208 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1210 hash_location = tcp_parse_md5sig_option(th);
1212 /* We've parsed the options - do we have a hash? */
1213 if (!hash_expected && !hash_location)
1216 if (hash_expected && !hash_location) {
1217 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1221 if (!hash_expected && hash_location) {
1222 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1226 /* Okay, so this is hash_expected and hash_location -
1227 * so we need to calculate the checksum.
1229 genhash = tcp_v4_md5_hash_skb(newhash,
1233 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1234 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1235 &iph->saddr, ntohs(th->source),
1236 &iph->daddr, ntohs(th->dest),
1237 genhash ? " tcp_v4_calc_md5_hash failed"
1246 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1248 .obj_size = sizeof(struct tcp_request_sock),
1249 .rtx_syn_ack = tcp_v4_rtx_synack,
1250 .send_ack = tcp_v4_reqsk_send_ack,
1251 .destructor = tcp_v4_reqsk_destructor,
1252 .send_reset = tcp_v4_send_reset,
1253 .syn_ack_timeout = tcp_syn_ack_timeout,
1256 #ifdef CONFIG_TCP_MD5SIG
1257 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1258 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1259 .calc_md5_hash = tcp_v4_md5_hash_skb,
1263 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1264 struct request_sock *req,
1265 struct tcp_fastopen_cookie *foc,
1266 struct tcp_fastopen_cookie *valid_foc)
1268 bool skip_cookie = false;
1269 struct fastopen_queue *fastopenq;
1271 if (likely(!fastopen_cookie_present(foc))) {
1272 /* See include/net/tcp.h for the meaning of these knobs */
1273 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1274 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1275 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1276 skip_cookie = true; /* no cookie to validate */
1280 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1281 /* A FO option is present; bump the counter. */
1282 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1284 /* Make sure the listener has enabled fastopen, and we don't
1285 * exceed the max # of pending TFO requests allowed before trying
1286 * to validating the cookie in order to avoid burning CPU cycles
1289 * XXX (TFO) - The implication of checking the max_qlen before
1290 * processing a cookie request is that clients can't differentiate
1291 * between qlen overflow causing Fast Open to be disabled
1292 * temporarily vs a server not supporting Fast Open at all.
1294 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1295 fastopenq == NULL || fastopenq->max_qlen == 0)
1298 if (fastopenq->qlen >= fastopenq->max_qlen) {
1299 struct request_sock *req1;
1300 spin_lock(&fastopenq->lock);
1301 req1 = fastopenq->rskq_rst_head;
1302 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1303 spin_unlock(&fastopenq->lock);
1304 NET_INC_STATS_BH(sock_net(sk),
1305 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1306 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1310 fastopenq->rskq_rst_head = req1->dl_next;
1312 spin_unlock(&fastopenq->lock);
1316 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1320 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1321 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1322 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1323 ip_hdr(skb)->daddr, valid_foc);
1324 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1325 memcmp(&foc->val[0], &valid_foc->val[0],
1326 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1328 valid_foc->len = -1;
1330 /* Acknowledge the data received from the peer. */
1331 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1333 } else if (foc->len == 0) { /* Client requesting a cookie */
1334 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1335 ip_hdr(skb)->daddr, valid_foc);
1336 NET_INC_STATS_BH(sock_net(sk),
1337 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1339 /* Client sent a cookie with wrong size. Treat it
1340 * the same as invalid and return a valid one.
1342 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1343 ip_hdr(skb)->daddr, valid_foc);
1348 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1349 struct sk_buff *skb,
1350 struct sk_buff *skb_synack,
1351 struct request_sock *req)
1353 struct tcp_sock *tp = tcp_sk(sk);
1354 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1355 const struct inet_request_sock *ireq = inet_rsk(req);
1359 req->num_retrans = 0;
1360 req->num_timeout = 0;
1363 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1364 if (child == NULL) {
1365 NET_INC_STATS_BH(sock_net(sk),
1366 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1367 kfree_skb(skb_synack);
1370 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1371 ireq->rmt_addr, ireq->opt);
1372 err = net_xmit_eval(err);
1374 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1375 /* XXX (TFO) - is it ok to ignore error and continue? */
1377 spin_lock(&queue->fastopenq->lock);
1378 queue->fastopenq->qlen++;
1379 spin_unlock(&queue->fastopenq->lock);
1381 /* Initialize the child socket. Have to fix some values to take
1382 * into account the child is a Fast Open socket and is created
1383 * only out of the bits carried in the SYN packet.
1387 tp->fastopen_rsk = req;
1388 /* Do a hold on the listner sk so that if the listener is being
1389 * closed, the child that has been accepted can live on and still
1390 * access listen_lock.
1393 tcp_rsk(req)->listener = sk;
1395 /* RFC1323: The window in SYN & SYN/ACK segments is never
1396 * scaled. So correct it appropriately.
1398 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1400 /* Activate the retrans timer so that SYNACK can be retransmitted.
1401 * The request socket is not added to the SYN table of the parent
1402 * because it's been added to the accept queue directly.
1404 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1405 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1407 /* Add the child socket directly into the accept queue */
1408 inet_csk_reqsk_queue_add(sk, req, child);
1410 /* Now finish processing the fastopen child socket. */
1411 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1412 tcp_init_congestion_control(child);
1413 tcp_mtup_init(child);
1414 tcp_init_buffer_space(child);
1415 tcp_init_metrics(child);
1417 /* Queue the data carried in the SYN packet. We need to first
1418 * bump skb's refcnt because the caller will attempt to free it.
1420 * XXX (TFO) - we honor a zero-payload TFO request for now.
1421 * (Any reason not to?)
1423 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1424 /* Don't queue the skb if there is no payload in SYN.
1425 * XXX (TFO) - How about SYN+FIN?
1427 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1431 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1432 skb_set_owner_r(skb, child);
1433 __skb_queue_tail(&child->sk_receive_queue, skb);
1434 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1435 tp->syn_data_acked = 1;
1437 sk->sk_data_ready(sk, 0);
1438 bh_unlock_sock(child);
1440 WARN_ON(req->sk == NULL);
1444 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1446 struct tcp_options_received tmp_opt;
1447 struct request_sock *req;
1448 struct inet_request_sock *ireq;
1449 struct tcp_sock *tp = tcp_sk(sk);
1450 struct dst_entry *dst = NULL;
1451 __be32 saddr = ip_hdr(skb)->saddr;
1452 __be32 daddr = ip_hdr(skb)->daddr;
1453 __u32 isn = TCP_SKB_CB(skb)->when;
1454 bool want_cookie = false;
1456 struct tcp_fastopen_cookie foc = { .len = -1 };
1457 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1458 struct sk_buff *skb_synack;
1461 /* Never answer to SYNs send to broadcast or multicast */
1462 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1465 /* TW buckets are converted to open requests without
1466 * limitations, they conserve resources and peer is
1467 * evidently real one.
1469 if ((sysctl_tcp_syncookies == 2 ||
1470 inet_csk_reqsk_queue_is_full(sk)) && !isn) {
1471 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1476 /* Accept backlog is full. If we have already queued enough
1477 * of warm entries in syn queue, drop request. It is better than
1478 * clogging syn queue with openreqs with exponentially increasing
1481 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1482 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1486 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1490 #ifdef CONFIG_TCP_MD5SIG
1491 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1494 tcp_clear_options(&tmp_opt);
1495 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1496 tmp_opt.user_mss = tp->rx_opt.user_mss;
1497 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1499 if (want_cookie && !tmp_opt.saw_tstamp)
1500 tcp_clear_options(&tmp_opt);
1502 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1503 tcp_openreq_init(req, &tmp_opt, skb);
1505 ireq = inet_rsk(req);
1506 ireq->loc_addr = daddr;
1507 ireq->rmt_addr = saddr;
1508 ireq->no_srccheck = inet_sk(sk)->transparent;
1509 ireq->opt = tcp_v4_save_options(skb);
1511 if (security_inet_conn_request(sk, skb, req))
1514 if (!want_cookie || tmp_opt.tstamp_ok)
1515 TCP_ECN_create_request(req, skb, sock_net(sk));
1518 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1519 req->cookie_ts = tmp_opt.tstamp_ok;
1521 /* VJ's idea. We save last timestamp seen
1522 * from the destination in peer table, when entering
1523 * state TIME-WAIT, and check against it before
1524 * accepting new connection request.
1526 * If "isn" is not zero, this request hit alive
1527 * timewait bucket, so that all the necessary checks
1528 * are made in the function processing timewait state.
1530 if (tmp_opt.saw_tstamp &&
1531 tcp_death_row.sysctl_tw_recycle &&
1532 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1533 fl4.daddr == saddr) {
1534 if (!tcp_peer_is_proven(req, dst, true)) {
1535 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1536 goto drop_and_release;
1539 /* Kill the following clause, if you dislike this way. */
1540 else if (!sysctl_tcp_syncookies &&
1541 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1542 (sysctl_max_syn_backlog >> 2)) &&
1543 !tcp_peer_is_proven(req, dst, false)) {
1544 /* Without syncookies last quarter of
1545 * backlog is filled with destinations,
1546 * proven to be alive.
1547 * It means that we continue to communicate
1548 * to destinations, already remembered
1549 * to the moment of synflood.
1551 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1552 &saddr, ntohs(tcp_hdr(skb)->source));
1553 goto drop_and_release;
1556 isn = tcp_v4_init_sequence(skb);
1558 tcp_rsk(req)->snt_isn = isn;
1561 dst = inet_csk_route_req(sk, &fl4, req);
1565 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1567 /* We don't call tcp_v4_send_synack() directly because we need
1568 * to make sure a child socket can be created successfully before
1569 * sending back synack!
1571 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1572 * (or better yet, call tcp_send_synack() in the child context
1573 * directly, but will have to fix bunch of other code first)
1574 * after syn_recv_sock() except one will need to first fix the
1575 * latter to remove its dependency on the current implementation
1576 * of tcp_v4_send_synack()->tcp_select_initial_window().
1578 skb_synack = tcp_make_synack(sk, dst, req,
1579 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1582 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1583 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1587 if (likely(!do_fastopen)) {
1589 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1590 ireq->rmt_addr, ireq->opt);
1591 err = net_xmit_eval(err);
1592 if (err || want_cookie)
1595 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1596 tcp_rsk(req)->listener = NULL;
1597 /* Add the request_sock to the SYN table */
1598 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1599 if (fastopen_cookie_present(&foc) && foc.len != 0)
1600 NET_INC_STATS_BH(sock_net(sk),
1601 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1602 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
1612 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1615 EXPORT_SYMBOL(tcp_v4_conn_request);
1619 * The three way handshake has completed - we got a valid synack -
1620 * now create the new socket.
1622 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1623 struct request_sock *req,
1624 struct dst_entry *dst)
1626 struct inet_request_sock *ireq;
1627 struct inet_sock *newinet;
1628 struct tcp_sock *newtp;
1630 #ifdef CONFIG_TCP_MD5SIG
1631 struct tcp_md5sig_key *key;
1633 struct ip_options_rcu *inet_opt;
1635 if (sk_acceptq_is_full(sk))
1638 newsk = tcp_create_openreq_child(sk, req, skb);
1642 newsk->sk_gso_type = SKB_GSO_TCPV4;
1643 inet_sk_rx_dst_set(newsk, skb);
1645 newtp = tcp_sk(newsk);
1646 newinet = inet_sk(newsk);
1647 ireq = inet_rsk(req);
1648 newinet->inet_daddr = ireq->rmt_addr;
1649 newinet->inet_rcv_saddr = ireq->loc_addr;
1650 newinet->inet_saddr = ireq->loc_addr;
1651 inet_opt = ireq->opt;
1652 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1654 newinet->mc_index = inet_iif(skb);
1655 newinet->mc_ttl = ip_hdr(skb)->ttl;
1656 newinet->rcv_tos = ip_hdr(skb)->tos;
1657 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1659 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1660 newinet->inet_id = newtp->write_seq ^ jiffies;
1663 dst = inet_csk_route_child_sock(sk, newsk, req);
1667 /* syncookie case : see end of cookie_v4_check() */
1669 sk_setup_caps(newsk, dst);
1671 tcp_mtup_init(newsk);
1672 tcp_sync_mss(newsk, dst_mtu(dst));
1673 newtp->advmss = dst_metric_advmss(dst);
1674 if (tcp_sk(sk)->rx_opt.user_mss &&
1675 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1676 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1678 tcp_initialize_rcv_mss(newsk);
1680 #ifdef CONFIG_TCP_MD5SIG
1681 /* Copy over the MD5 key from the original socket */
1682 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1686 * We're using one, so create a matching key
1687 * on the newsk structure. If we fail to get
1688 * memory, then we end up not copying the key
1691 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1692 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1693 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1697 if (__inet_inherit_port(sk, newsk) < 0)
1699 __inet_hash_nolisten(newsk, NULL);
1704 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1708 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1711 inet_csk_prepare_forced_close(newsk);
1715 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1717 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1719 struct tcphdr *th = tcp_hdr(skb);
1720 const struct iphdr *iph = ip_hdr(skb);
1722 struct request_sock **prev;
1723 /* Find possible connection requests. */
1724 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1725 iph->saddr, iph->daddr);
1727 return tcp_check_req(sk, skb, req, prev, false);
1729 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1730 th->source, iph->daddr, th->dest, inet_iif(skb));
1733 if (nsk->sk_state != TCP_TIME_WAIT) {
1737 inet_twsk_put(inet_twsk(nsk));
1741 #ifdef CONFIG_SYN_COOKIES
1743 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1748 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1750 const struct iphdr *iph = ip_hdr(skb);
1752 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1753 if (!tcp_v4_check(skb->len, iph->saddr,
1754 iph->daddr, skb->csum)) {
1755 skb->ip_summed = CHECKSUM_UNNECESSARY;
1760 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1761 skb->len, IPPROTO_TCP, 0);
1763 if (skb->len <= 76) {
1764 return __skb_checksum_complete(skb);
1770 /* The socket must have it's spinlock held when we get
1773 * We have a potential double-lock case here, so even when
1774 * doing backlog processing we use the BH locking scheme.
1775 * This is because we cannot sleep with the original spinlock
1778 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1781 #ifdef CONFIG_TCP_MD5SIG
1783 * We really want to reject the packet as early as possible
1785 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1786 * o There is an MD5 option and we're not expecting one
1788 if (tcp_v4_inbound_md5_hash(sk, skb))
1792 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1793 struct dst_entry *dst = sk->sk_rx_dst;
1795 sock_rps_save_rxhash(sk, skb);
1797 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1798 dst->ops->check(dst, 0) == NULL) {
1800 sk->sk_rx_dst = NULL;
1803 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1810 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1813 if (sk->sk_state == TCP_LISTEN) {
1814 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1819 sock_rps_save_rxhash(nsk, skb);
1820 if (tcp_child_process(sk, nsk, skb)) {
1827 sock_rps_save_rxhash(sk, skb);
1829 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1836 tcp_v4_send_reset(rsk, skb);
1839 /* Be careful here. If this function gets more complicated and
1840 * gcc suffers from register pressure on the x86, sk (in %ebx)
1841 * might be destroyed here. This current version compiles correctly,
1842 * but you have been warned.
1847 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1848 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1851 EXPORT_SYMBOL(tcp_v4_do_rcv);
1853 void tcp_v4_early_demux(struct sk_buff *skb)
1855 const struct iphdr *iph;
1856 const struct tcphdr *th;
1859 if (skb->pkt_type != PACKET_HOST)
1862 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1868 if (th->doff < sizeof(struct tcphdr) / 4)
1871 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1872 iph->saddr, th->source,
1873 iph->daddr, ntohs(th->dest),
1877 skb->destructor = sock_edemux;
1878 if (sk->sk_state != TCP_TIME_WAIT) {
1879 struct dst_entry *dst = sk->sk_rx_dst;
1882 dst = dst_check(dst, 0);
1884 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1885 skb_dst_set_noref(skb, dst);
1890 /* Packet is added to VJ-style prequeue for processing in process
1891 * context, if a reader task is waiting. Apparently, this exciting
1892 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1893 * failed somewhere. Latency? Burstiness? Well, at least now we will
1894 * see, why it failed. 8)8) --ANK
1897 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1899 struct tcp_sock *tp = tcp_sk(sk);
1901 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1904 if (skb->len <= tcp_hdrlen(skb) &&
1905 skb_queue_len(&tp->ucopy.prequeue) == 0)
1909 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1910 tp->ucopy.memory += skb->truesize;
1911 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1912 struct sk_buff *skb1;
1914 BUG_ON(sock_owned_by_user(sk));
1916 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1917 sk_backlog_rcv(sk, skb1);
1918 NET_INC_STATS_BH(sock_net(sk),
1919 LINUX_MIB_TCPPREQUEUEDROPPED);
1922 tp->ucopy.memory = 0;
1923 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1924 wake_up_interruptible_sync_poll(sk_sleep(sk),
1925 POLLIN | POLLRDNORM | POLLRDBAND);
1926 if (!inet_csk_ack_scheduled(sk))
1927 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1928 (3 * tcp_rto_min(sk)) / 4,
1933 EXPORT_SYMBOL(tcp_prequeue);
1939 int tcp_v4_rcv(struct sk_buff *skb)
1941 const struct iphdr *iph;
1942 const struct tcphdr *th;
1945 struct net *net = dev_net(skb->dev);
1947 if (skb->pkt_type != PACKET_HOST)
1950 /* Count it even if it's bad */
1951 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1953 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1958 if (th->doff < sizeof(struct tcphdr) / 4)
1960 if (!pskb_may_pull(skb, th->doff * 4))
1963 /* An explanation is required here, I think.
1964 * Packet length and doff are validated by header prediction,
1965 * provided case of th->doff==0 is eliminated.
1966 * So, we defer the checks. */
1967 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1972 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1973 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1974 skb->len - th->doff * 4);
1975 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1976 TCP_SKB_CB(skb)->when = 0;
1977 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1978 TCP_SKB_CB(skb)->sacked = 0;
1980 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1985 if (sk->sk_state == TCP_TIME_WAIT)
1988 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1989 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1990 goto discard_and_relse;
1993 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1994 goto discard_and_relse;
1997 if (sk_filter(sk, skb))
1998 goto discard_and_relse;
2000 sk_mark_napi_id(sk, skb);
2003 bh_lock_sock_nested(sk);
2005 if (!sock_owned_by_user(sk)) {
2006 #ifdef CONFIG_NET_DMA
2007 struct tcp_sock *tp = tcp_sk(sk);
2008 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2009 tp->ucopy.dma_chan = net_dma_find_channel();
2010 if (tp->ucopy.dma_chan)
2011 ret = tcp_v4_do_rcv(sk, skb);
2015 if (!tcp_prequeue(sk, skb))
2016 ret = tcp_v4_do_rcv(sk, skb);
2018 } else if (unlikely(sk_add_backlog(sk, skb,
2019 sk->sk_rcvbuf + sk->sk_sndbuf))) {
2021 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2022 goto discard_and_relse;
2031 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2034 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2036 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
2038 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2040 tcp_v4_send_reset(NULL, skb);
2044 /* Discard frame. */
2053 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2054 inet_twsk_put(inet_twsk(sk));
2058 if (skb->len < (th->doff << 2)) {
2059 inet_twsk_put(inet_twsk(sk));
2062 if (tcp_checksum_complete(skb)) {
2063 inet_twsk_put(inet_twsk(sk));
2066 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2068 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2070 iph->saddr, th->source,
2071 iph->daddr, th->dest,
2074 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2075 inet_twsk_put(inet_twsk(sk));
2079 /* Fall through to ACK */
2082 tcp_v4_timewait_ack(sk, skb);
2086 case TCP_TW_SUCCESS:;
2091 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2092 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2093 .twsk_unique = tcp_twsk_unique,
2094 .twsk_destructor= tcp_twsk_destructor,
2097 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2099 struct dst_entry *dst = skb_dst(skb);
2102 sk->sk_rx_dst = dst;
2103 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2105 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2107 const struct inet_connection_sock_af_ops ipv4_specific = {
2108 .queue_xmit = ip_queue_xmit,
2109 .send_check = tcp_v4_send_check,
2110 .rebuild_header = inet_sk_rebuild_header,
2111 .sk_rx_dst_set = inet_sk_rx_dst_set,
2112 .conn_request = tcp_v4_conn_request,
2113 .syn_recv_sock = tcp_v4_syn_recv_sock,
2114 .net_header_len = sizeof(struct iphdr),
2115 .setsockopt = ip_setsockopt,
2116 .getsockopt = ip_getsockopt,
2117 .addr2sockaddr = inet_csk_addr2sockaddr,
2118 .sockaddr_len = sizeof(struct sockaddr_in),
2119 .bind_conflict = inet_csk_bind_conflict,
2120 #ifdef CONFIG_COMPAT
2121 .compat_setsockopt = compat_ip_setsockopt,
2122 .compat_getsockopt = compat_ip_getsockopt,
2125 EXPORT_SYMBOL(ipv4_specific);
2127 #ifdef CONFIG_TCP_MD5SIG
2128 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2129 .md5_lookup = tcp_v4_md5_lookup,
2130 .calc_md5_hash = tcp_v4_md5_hash_skb,
2131 .md5_parse = tcp_v4_parse_md5_keys,
2135 /* NOTE: A lot of things set to zero explicitly by call to
2136 * sk_alloc() so need not be done here.
2138 static int tcp_v4_init_sock(struct sock *sk)
2140 struct inet_connection_sock *icsk = inet_csk(sk);
2144 icsk->icsk_af_ops = &ipv4_specific;
2146 #ifdef CONFIG_TCP_MD5SIG
2147 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2153 void tcp_v4_destroy_sock(struct sock *sk)
2155 struct tcp_sock *tp = tcp_sk(sk);
2157 tcp_clear_xmit_timers(sk);
2159 tcp_cleanup_congestion_control(sk);
2161 /* Cleanup up the write buffer. */
2162 tcp_write_queue_purge(sk);
2164 /* Cleans up our, hopefully empty, out_of_order_queue. */
2165 __skb_queue_purge(&tp->out_of_order_queue);
2167 #ifdef CONFIG_TCP_MD5SIG
2168 /* Clean up the MD5 key list, if any */
2169 if (tp->md5sig_info) {
2170 tcp_clear_md5_list(sk);
2171 kfree_rcu(tp->md5sig_info, rcu);
2172 tp->md5sig_info = NULL;
2176 #ifdef CONFIG_NET_DMA
2177 /* Cleans up our sk_async_wait_queue */
2178 __skb_queue_purge(&sk->sk_async_wait_queue);
2181 /* Clean prequeue, it must be empty really */
2182 __skb_queue_purge(&tp->ucopy.prequeue);
2184 /* Clean up a referenced TCP bind bucket. */
2185 if (inet_csk(sk)->icsk_bind_hash)
2188 BUG_ON(tp->fastopen_rsk != NULL);
2190 /* If socket is aborted during connect operation */
2191 tcp_free_fastopen_req(tp);
2193 sk_sockets_allocated_dec(sk);
2194 sock_release_memcg(sk);
2196 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2198 #ifdef CONFIG_PROC_FS
2199 /* Proc filesystem TCP sock list dumping. */
2201 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2203 return hlist_nulls_empty(head) ? NULL :
2204 list_entry(head->first, struct inet_timewait_sock, tw_node);
2207 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2209 return !is_a_nulls(tw->tw_node.next) ?
2210 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2214 * Get next listener socket follow cur. If cur is NULL, get first socket
2215 * starting from bucket given in st->bucket; when st->bucket is zero the
2216 * very first socket in the hash table is returned.
2218 static void *listening_get_next(struct seq_file *seq, void *cur)
2220 struct inet_connection_sock *icsk;
2221 struct hlist_nulls_node *node;
2222 struct sock *sk = cur;
2223 struct inet_listen_hashbucket *ilb;
2224 struct tcp_iter_state *st = seq->private;
2225 struct net *net = seq_file_net(seq);
2228 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2229 spin_lock_bh(&ilb->lock);
2230 sk = sk_nulls_head(&ilb->head);
2234 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2238 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2239 struct request_sock *req = cur;
2241 icsk = inet_csk(st->syn_wait_sk);
2245 if (req->rsk_ops->family == st->family) {
2251 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2254 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2256 sk = sk_nulls_next(st->syn_wait_sk);
2257 st->state = TCP_SEQ_STATE_LISTENING;
2258 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2260 icsk = inet_csk(sk);
2261 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2262 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2264 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2265 sk = sk_nulls_next(sk);
2268 sk_nulls_for_each_from(sk, node) {
2269 if (!net_eq(sock_net(sk), net))
2271 if (sk->sk_family == st->family) {
2275 icsk = inet_csk(sk);
2276 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2277 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2279 st->uid = sock_i_uid(sk);
2280 st->syn_wait_sk = sk;
2281 st->state = TCP_SEQ_STATE_OPENREQ;
2285 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2287 spin_unlock_bh(&ilb->lock);
2289 if (++st->bucket < INET_LHTABLE_SIZE) {
2290 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2291 spin_lock_bh(&ilb->lock);
2292 sk = sk_nulls_head(&ilb->head);
2300 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2302 struct tcp_iter_state *st = seq->private;
2307 rc = listening_get_next(seq, NULL);
2309 while (rc && *pos) {
2310 rc = listening_get_next(seq, rc);
2316 static inline bool empty_bucket(struct tcp_iter_state *st)
2318 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2319 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2323 * Get first established socket starting from bucket given in st->bucket.
2324 * If st->bucket is zero, the very first socket in the hash is returned.
2326 static void *established_get_first(struct seq_file *seq)
2328 struct tcp_iter_state *st = seq->private;
2329 struct net *net = seq_file_net(seq);
2333 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2335 struct hlist_nulls_node *node;
2336 struct inet_timewait_sock *tw;
2337 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2339 /* Lockless fast path for the common case of empty buckets */
2340 if (empty_bucket(st))
2344 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2345 if (sk->sk_family != st->family ||
2346 !net_eq(sock_net(sk), net)) {
2352 st->state = TCP_SEQ_STATE_TIME_WAIT;
2353 inet_twsk_for_each(tw, node,
2354 &tcp_hashinfo.ehash[st->bucket].twchain) {
2355 if (tw->tw_family != st->family ||
2356 !net_eq(twsk_net(tw), net)) {
2362 spin_unlock_bh(lock);
2363 st->state = TCP_SEQ_STATE_ESTABLISHED;
2369 static void *established_get_next(struct seq_file *seq, void *cur)
2371 struct sock *sk = cur;
2372 struct inet_timewait_sock *tw;
2373 struct hlist_nulls_node *node;
2374 struct tcp_iter_state *st = seq->private;
2375 struct net *net = seq_file_net(seq);
2380 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2384 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2391 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2392 st->state = TCP_SEQ_STATE_ESTABLISHED;
2394 /* Look for next non empty bucket */
2396 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2399 if (st->bucket > tcp_hashinfo.ehash_mask)
2402 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2403 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2405 sk = sk_nulls_next(sk);
2407 sk_nulls_for_each_from(sk, node) {
2408 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2412 st->state = TCP_SEQ_STATE_TIME_WAIT;
2413 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2421 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2423 struct tcp_iter_state *st = seq->private;
2427 rc = established_get_first(seq);
2430 rc = established_get_next(seq, rc);
2436 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2439 struct tcp_iter_state *st = seq->private;
2441 st->state = TCP_SEQ_STATE_LISTENING;
2442 rc = listening_get_idx(seq, &pos);
2445 st->state = TCP_SEQ_STATE_ESTABLISHED;
2446 rc = established_get_idx(seq, pos);
2452 static void *tcp_seek_last_pos(struct seq_file *seq)
2454 struct tcp_iter_state *st = seq->private;
2455 int offset = st->offset;
2456 int orig_num = st->num;
2459 switch (st->state) {
2460 case TCP_SEQ_STATE_OPENREQ:
2461 case TCP_SEQ_STATE_LISTENING:
2462 if (st->bucket >= INET_LHTABLE_SIZE)
2464 st->state = TCP_SEQ_STATE_LISTENING;
2465 rc = listening_get_next(seq, NULL);
2466 while (offset-- && rc)
2467 rc = listening_get_next(seq, rc);
2472 case TCP_SEQ_STATE_ESTABLISHED:
2473 case TCP_SEQ_STATE_TIME_WAIT:
2474 st->state = TCP_SEQ_STATE_ESTABLISHED;
2475 if (st->bucket > tcp_hashinfo.ehash_mask)
2477 rc = established_get_first(seq);
2478 while (offset-- && rc)
2479 rc = established_get_next(seq, rc);
2487 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2489 struct tcp_iter_state *st = seq->private;
2492 if (*pos && *pos == st->last_pos) {
2493 rc = tcp_seek_last_pos(seq);
2498 st->state = TCP_SEQ_STATE_LISTENING;
2502 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2505 st->last_pos = *pos;
2509 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2511 struct tcp_iter_state *st = seq->private;
2514 if (v == SEQ_START_TOKEN) {
2515 rc = tcp_get_idx(seq, 0);
2519 switch (st->state) {
2520 case TCP_SEQ_STATE_OPENREQ:
2521 case TCP_SEQ_STATE_LISTENING:
2522 rc = listening_get_next(seq, v);
2524 st->state = TCP_SEQ_STATE_ESTABLISHED;
2527 rc = established_get_first(seq);
2530 case TCP_SEQ_STATE_ESTABLISHED:
2531 case TCP_SEQ_STATE_TIME_WAIT:
2532 rc = established_get_next(seq, v);
2537 st->last_pos = *pos;
2541 static void tcp_seq_stop(struct seq_file *seq, void *v)
2543 struct tcp_iter_state *st = seq->private;
2545 switch (st->state) {
2546 case TCP_SEQ_STATE_OPENREQ:
2548 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2549 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2551 case TCP_SEQ_STATE_LISTENING:
2552 if (v != SEQ_START_TOKEN)
2553 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2555 case TCP_SEQ_STATE_TIME_WAIT:
2556 case TCP_SEQ_STATE_ESTABLISHED:
2558 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2563 int tcp_seq_open(struct inode *inode, struct file *file)
2565 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2566 struct tcp_iter_state *s;
2569 err = seq_open_net(inode, file, &afinfo->seq_ops,
2570 sizeof(struct tcp_iter_state));
2574 s = ((struct seq_file *)file->private_data)->private;
2575 s->family = afinfo->family;
2579 EXPORT_SYMBOL(tcp_seq_open);
2581 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2584 struct proc_dir_entry *p;
2586 afinfo->seq_ops.start = tcp_seq_start;
2587 afinfo->seq_ops.next = tcp_seq_next;
2588 afinfo->seq_ops.stop = tcp_seq_stop;
2590 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2591 afinfo->seq_fops, afinfo);
2596 EXPORT_SYMBOL(tcp_proc_register);
2598 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2600 remove_proc_entry(afinfo->name, net->proc_net);
2602 EXPORT_SYMBOL(tcp_proc_unregister);
2604 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2605 struct seq_file *f, int i, kuid_t uid, int *len)
2607 const struct inet_request_sock *ireq = inet_rsk(req);
2608 long delta = req->expires - jiffies;
2610 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2611 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2614 ntohs(inet_sk(sk)->inet_sport),
2616 ntohs(ireq->rmt_port),
2618 0, 0, /* could print option size, but that is af dependent. */
2619 1, /* timers active (only the expire timer) */
2620 jiffies_delta_to_clock_t(delta),
2622 from_kuid_munged(seq_user_ns(f), uid),
2623 0, /* non standard timer */
2624 0, /* open_requests have no inode */
2625 atomic_read(&sk->sk_refcnt),
2630 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2633 unsigned long timer_expires;
2634 const struct tcp_sock *tp = tcp_sk(sk);
2635 const struct inet_connection_sock *icsk = inet_csk(sk);
2636 const struct inet_sock *inet = inet_sk(sk);
2637 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2638 __be32 dest = inet->inet_daddr;
2639 __be32 src = inet->inet_rcv_saddr;
2640 __u16 destp = ntohs(inet->inet_dport);
2641 __u16 srcp = ntohs(inet->inet_sport);
2644 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2645 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2646 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2648 timer_expires = icsk->icsk_timeout;
2649 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2651 timer_expires = icsk->icsk_timeout;
2652 } else if (timer_pending(&sk->sk_timer)) {
2654 timer_expires = sk->sk_timer.expires;
2657 timer_expires = jiffies;
2660 if (sk->sk_state == TCP_LISTEN)
2661 rx_queue = sk->sk_ack_backlog;
2664 * because we dont lock socket, we might find a transient negative value
2666 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2668 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2669 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2670 i, src, srcp, dest, destp, sk->sk_state,
2671 tp->write_seq - tp->snd_una,
2674 jiffies_delta_to_clock_t(timer_expires - jiffies),
2675 icsk->icsk_retransmits,
2676 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2677 icsk->icsk_probes_out,
2679 atomic_read(&sk->sk_refcnt), sk,
2680 jiffies_to_clock_t(icsk->icsk_rto),
2681 jiffies_to_clock_t(icsk->icsk_ack.ato),
2682 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2684 sk->sk_state == TCP_LISTEN ?
2685 (fastopenq ? fastopenq->max_qlen : 0) :
2686 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2690 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2691 struct seq_file *f, int i, int *len)
2695 long delta = tw->tw_ttd - jiffies;
2697 dest = tw->tw_daddr;
2698 src = tw->tw_rcv_saddr;
2699 destp = ntohs(tw->tw_dport);
2700 srcp = ntohs(tw->tw_sport);
2702 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2703 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2704 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2705 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2706 atomic_read(&tw->tw_refcnt), tw, len);
2711 static int tcp4_seq_show(struct seq_file *seq, void *v)
2713 struct tcp_iter_state *st;
2716 if (v == SEQ_START_TOKEN) {
2717 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2718 " sl local_address rem_address st tx_queue "
2719 "rx_queue tr tm->when retrnsmt uid timeout "
2725 switch (st->state) {
2726 case TCP_SEQ_STATE_LISTENING:
2727 case TCP_SEQ_STATE_ESTABLISHED:
2728 get_tcp4_sock(v, seq, st->num, &len);
2730 case TCP_SEQ_STATE_OPENREQ:
2731 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2733 case TCP_SEQ_STATE_TIME_WAIT:
2734 get_timewait4_sock(v, seq, st->num, &len);
2737 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2742 static const struct file_operations tcp_afinfo_seq_fops = {
2743 .owner = THIS_MODULE,
2744 .open = tcp_seq_open,
2746 .llseek = seq_lseek,
2747 .release = seq_release_net
2750 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2753 .seq_fops = &tcp_afinfo_seq_fops,
2755 .show = tcp4_seq_show,
2759 static int __net_init tcp4_proc_init_net(struct net *net)
2761 return tcp_proc_register(net, &tcp4_seq_afinfo);
2764 static void __net_exit tcp4_proc_exit_net(struct net *net)
2766 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2769 static struct pernet_operations tcp4_net_ops = {
2770 .init = tcp4_proc_init_net,
2771 .exit = tcp4_proc_exit_net,
2774 int __init tcp4_proc_init(void)
2776 return register_pernet_subsys(&tcp4_net_ops);
2779 void tcp4_proc_exit(void)
2781 unregister_pernet_subsys(&tcp4_net_ops);
2783 #endif /* CONFIG_PROC_FS */
2785 struct proto tcp_prot = {
2787 .owner = THIS_MODULE,
2789 .connect = tcp_v4_connect,
2790 .disconnect = tcp_disconnect,
2791 .accept = inet_csk_accept,
2793 .init = tcp_v4_init_sock,
2794 .destroy = tcp_v4_destroy_sock,
2795 .shutdown = tcp_shutdown,
2796 .setsockopt = tcp_setsockopt,
2797 .getsockopt = tcp_getsockopt,
2798 .recvmsg = tcp_recvmsg,
2799 .sendmsg = tcp_sendmsg,
2800 .sendpage = tcp_sendpage,
2801 .backlog_rcv = tcp_v4_do_rcv,
2802 .release_cb = tcp_release_cb,
2803 .mtu_reduced = tcp_v4_mtu_reduced,
2805 .unhash = inet_unhash,
2806 .get_port = inet_csk_get_port,
2807 .enter_memory_pressure = tcp_enter_memory_pressure,
2808 .stream_memory_free = tcp_stream_memory_free,
2809 .sockets_allocated = &tcp_sockets_allocated,
2810 .orphan_count = &tcp_orphan_count,
2811 .memory_allocated = &tcp_memory_allocated,
2812 .memory_pressure = &tcp_memory_pressure,
2813 .sysctl_wmem = sysctl_tcp_wmem,
2814 .sysctl_rmem = sysctl_tcp_rmem,
2815 .max_header = MAX_TCP_HEADER,
2816 .obj_size = sizeof(struct tcp_sock),
2817 .slab_flags = SLAB_DESTROY_BY_RCU,
2818 .twsk_prot = &tcp_timewait_sock_ops,
2819 .rsk_prot = &tcp_request_sock_ops,
2820 .h.hashinfo = &tcp_hashinfo,
2821 .no_autobind = true,
2822 #ifdef CONFIG_COMPAT
2823 .compat_setsockopt = compat_tcp_setsockopt,
2824 .compat_getsockopt = compat_tcp_getsockopt,
2826 #ifdef CONFIG_MEMCG_KMEM
2827 .init_cgroup = tcp_init_cgroup,
2828 .destroy_cgroup = tcp_destroy_cgroup,
2829 .proto_cgroup = tcp_proto_cgroup,
2832 EXPORT_SYMBOL(tcp_prot);
2834 static int __net_init tcp_sk_init(struct net *net)
2836 net->ipv4.sysctl_tcp_ecn = 2;
2840 static void __net_exit tcp_sk_exit(struct net *net)
2844 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2846 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2849 static struct pernet_operations __net_initdata tcp_sk_ops = {
2850 .init = tcp_sk_init,
2851 .exit = tcp_sk_exit,
2852 .exit_batch = tcp_sk_exit_batch,
2855 void __init tcp_v4_init(void)
2857 inet_hashinfo_init(&tcp_hashinfo);
2858 if (register_pernet_subsys(&tcp_sk_ops))
2859 panic("Failed to create the TCP control socket.\n");