2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 #include <net/busy_poll.h>
80 #include <linux/inet.h>
81 #include <linux/ipv6.h>
82 #include <linux/stddef.h>
83 #include <linux/proc_fs.h>
84 #include <linux/seq_file.h>
86 #include <linux/crypto.h>
87 #include <linux/scatterlist.h>
89 int sysctl_tcp_tw_reuse __read_mostly;
90 int sysctl_tcp_low_latency __read_mostly;
91 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
152 struct ip_options_rcu *inet_opt;
154 if (addr_len < sizeof(struct sockaddr_in))
157 if (usin->sin_family != AF_INET)
158 return -EAFNOSUPPORT;
160 nexthop = daddr = usin->sin_addr.s_addr;
161 inet_opt = rcu_dereference_protected(inet->inet_opt,
162 sock_owned_by_user(sk));
163 if (inet_opt && inet_opt->opt.srr) {
166 nexthop = inet_opt->opt.faddr;
169 orig_sport = inet->inet_sport;
170 orig_dport = usin->sin_port;
171 fl4 = &inet->cork.fl.u.ip4;
172 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175 orig_sport, orig_dport, sk);
178 if (err == -ENETUNREACH)
179 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
188 if (!inet_opt || !inet_opt->opt.srr)
191 if (!inet->inet_saddr)
192 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr;
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
199 if (likely(!tp->repair))
203 if (tcp_death_row.sysctl_tw_recycle &&
204 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205 tcp_fetch_timewait_stamp(sk, &rt->dst);
207 inet->inet_dport = usin->sin_port;
208 inet->inet_daddr = daddr;
212 inet_csk(sk)->icsk_ext_hdr_len = 0;
214 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
216 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
218 /* Socket identity is still unknown (sport may be zero).
219 * However we set state to SYN-SENT and not releasing socket
220 * lock select source port, enter ourselves into the hash tables and
221 * complete initialization after this.
223 tcp_set_state(sk, TCP_SYN_SENT);
224 err = inet_hash_connect(&tcp_death_row, sk);
228 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
229 inet->inet_sport, inet->inet_dport, sk);
235 /* OK, now commit destination to socket. */
236 sk->sk_gso_type = SKB_GSO_TCPV4;
237 sk_setup_caps(sk, &rt->dst);
239 if (!tp->write_seq && likely(!tp->repair))
240 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
245 inet->inet_id = tp->write_seq ^ jiffies;
247 err = tcp_connect(sk);
257 * This unhashes the socket and releases the local port,
260 tcp_set_state(sk, TCP_CLOSE);
262 sk->sk_route_caps = 0;
263 inet->inet_dport = 0;
266 EXPORT_SYMBOL(tcp_v4_connect);
269 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
270 * It can be called through tcp_release_cb() if socket was owned by user
271 * at the time tcp_v4_err() was called to handle ICMP message.
273 void tcp_v4_mtu_reduced(struct sock *sk)
275 struct dst_entry *dst;
276 struct inet_sock *inet = inet_sk(sk);
277 u32 mtu = tcp_sk(sk)->mtu_info;
279 dst = inet_csk_update_pmtu(sk, mtu);
283 /* Something is about to be wrong... Remember soft error
284 * for the case, if this connection will not able to recover.
286 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
287 sk->sk_err_soft = EMSGSIZE;
291 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
292 ip_sk_accept_pmtu(sk) &&
293 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
294 tcp_sync_mss(sk, mtu);
296 /* Resend the TCP packet because it's
297 * clear that the old packet has been
298 * dropped. This is the new "fast" path mtu
301 tcp_simple_retransmit(sk);
302 } /* else let the usual retransmit timer handle it */
304 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
306 static void do_redirect(struct sk_buff *skb, struct sock *sk)
308 struct dst_entry *dst = __sk_dst_check(sk, 0);
311 dst->ops->redirect(dst, sk, skb);
315 * This routine is called by the ICMP module when it gets some
316 * sort of error condition. If err < 0 then the socket should
317 * be closed and the error returned to the user. If err > 0
318 * it's just the icmp type << 8 | icmp code. After adjustment
319 * header points to the first 8 bytes of the tcp header. We need
320 * to find the appropriate port.
322 * The locking strategy used here is very "optimistic". When
323 * someone else accesses the socket the ICMP is just dropped
324 * and for some paths there is no check at all.
325 * A more general error queue to queue errors for later handling
326 * is probably better.
330 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
332 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
333 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
334 struct inet_connection_sock *icsk;
336 struct inet_sock *inet;
337 const int type = icmp_hdr(icmp_skb)->type;
338 const int code = icmp_hdr(icmp_skb)->code;
341 struct request_sock *fastopen;
345 struct net *net = dev_net(icmp_skb->dev);
347 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
348 iph->saddr, th->source, inet_iif(icmp_skb));
350 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
353 if (sk->sk_state == TCP_TIME_WAIT) {
354 inet_twsk_put(inet_twsk(sk));
359 /* If too many ICMPs get dropped on busy
360 * servers this needs to be solved differently.
361 * We do take care of PMTU discovery (RFC1191) special case :
362 * we can receive locally generated ICMP messages while socket is held.
364 if (sock_owned_by_user(sk)) {
365 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
366 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
368 if (sk->sk_state == TCP_CLOSE)
371 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
372 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
378 seq = ntohl(th->seq);
379 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
380 fastopen = tp->fastopen_rsk;
381 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, snd_una, tp->snd_nxt)) {
384 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
390 do_redirect(icmp_skb, sk);
392 case ICMP_SOURCE_QUENCH:
393 /* Just silently ignore these. */
395 case ICMP_PARAMETERPROB:
398 case ICMP_DEST_UNREACH:
399 if (code > NR_ICMP_UNREACH)
402 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
403 /* We are not interested in TCP_LISTEN and open_requests
404 * (SYN-ACKs send out by Linux are always <576bytes so
405 * they should go through unfragmented).
407 if (sk->sk_state == TCP_LISTEN)
411 if (!sock_owned_by_user(sk)) {
412 tcp_v4_mtu_reduced(sk);
414 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
420 err = icmp_err_convert[code].errno;
421 /* check if icmp_skb allows revert of backoff
422 * (see draft-zimmermann-tcp-lcd) */
423 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
425 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
426 !icsk->icsk_backoff || fastopen)
429 if (sock_owned_by_user(sk))
432 icsk->icsk_backoff--;
433 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
435 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
437 skb = tcp_write_queue_head(sk);
440 remaining = icsk->icsk_rto -
442 tcp_time_stamp - tcp_skb_timestamp(skb));
445 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
446 remaining, TCP_RTO_MAX);
448 /* RTO revert clocked out retransmission.
449 * Will retransmit now */
450 tcp_retransmit_timer(sk);
454 case ICMP_TIME_EXCEEDED:
461 switch (sk->sk_state) {
462 struct request_sock *req, **prev;
464 if (sock_owned_by_user(sk))
467 req = inet_csk_search_req(sk, &prev, th->dest,
468 iph->daddr, iph->saddr);
472 /* ICMPs are not backlogged, hence we cannot get
473 an established socket here.
477 if (seq != tcp_rsk(req)->snt_isn) {
478 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
483 * Still in SYN_RECV, just remove it silently.
484 * There is no good way to pass the error to the newly
485 * created socket, and POSIX does not want network
486 * errors returned from accept().
488 inet_csk_reqsk_queue_drop(sk, req, prev);
489 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
494 /* Only in fast or simultaneous open. If a fast open socket is
495 * is already accepted it is treated as a connected one below.
497 if (fastopen && fastopen->sk == NULL)
500 if (!sock_owned_by_user(sk)) {
503 sk->sk_error_report(sk);
507 sk->sk_err_soft = err;
512 /* If we've already connected we will keep trying
513 * until we time out, or the user gives up.
515 * rfc1122 4.2.3.9 allows to consider as hard errors
516 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 * but it is obsoleted by pmtu discovery).
519 * Note, that in modern internet, where routing is unreliable
520 * and in each dark corner broken firewalls sit, sending random
521 * errors ordered by their masters even this two messages finally lose
522 * their original sense (even Linux sends invalid PORT_UNREACHs)
524 * Now we are in compliance with RFCs.
529 if (!sock_owned_by_user(sk) && inet->recverr) {
531 sk->sk_error_report(sk);
532 } else { /* Only an error on timeout */
533 sk->sk_err_soft = err;
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
543 struct tcphdr *th = tcp_hdr(skb);
545 if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 skb->csum_start = skb_transport_header(skb) - skb->head;
548 skb->csum_offset = offsetof(struct tcphdr, check);
550 th->check = tcp_v4_check(skb->len, saddr, daddr,
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
560 const struct inet_sock *inet = inet_sk(sk);
562 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
564 EXPORT_SYMBOL(tcp_v4_send_check);
567 * This routine will send an RST to the other tcp.
569 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
571 * Answer: if a packet caused RST, it is not for a socket
572 * existing in our system, if it is matched to a socket,
573 * it is just duplicate segment or bug in other side's TCP.
574 * So that we build reply only basing on parameters
575 * arrived with segment.
576 * Exception: precedence violation. We do not implement it in any case.
579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
581 const struct tcphdr *th = tcp_hdr(skb);
584 #ifdef CONFIG_TCP_MD5SIG
585 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
588 struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 struct tcp_md5sig_key *key;
591 const __u8 *hash_location = NULL;
592 unsigned char newhash[16];
594 struct sock *sk1 = NULL;
598 /* Never send a reset in response to a reset. */
602 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
605 /* Swap the send and the receive. */
606 memset(&rep, 0, sizeof(rep));
607 rep.th.dest = th->source;
608 rep.th.source = th->dest;
609 rep.th.doff = sizeof(struct tcphdr) / 4;
613 rep.th.seq = th->ack_seq;
616 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
617 skb->len - (th->doff << 2));
620 memset(&arg, 0, sizeof(arg));
621 arg.iov[0].iov_base = (unsigned char *)&rep;
622 arg.iov[0].iov_len = sizeof(rep.th);
624 #ifdef CONFIG_TCP_MD5SIG
625 hash_location = tcp_parse_md5sig_option(th);
626 if (!sk && hash_location) {
628 * active side is lost. Try to find listening socket through
629 * source port, and then find md5 key through listening socket.
630 * we are not loose security here:
631 * Incoming packet is checked with md5 hash with finding key,
632 * no RST generated if md5 hash doesn't match.
634 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
635 &tcp_hashinfo, ip_hdr(skb)->saddr,
636 th->source, ip_hdr(skb)->daddr,
637 ntohs(th->source), inet_iif(skb));
638 /* don't send rst if it can't find key */
642 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
643 &ip_hdr(skb)->saddr, AF_INET);
647 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
648 if (genhash || memcmp(hash_location, newhash, 16) != 0)
651 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
657 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
659 (TCPOPT_MD5SIG << 8) |
661 /* Update length and the length the header thinks exists */
662 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
663 rep.th.doff = arg.iov[0].iov_len / 4;
665 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
666 key, ip_hdr(skb)->saddr,
667 ip_hdr(skb)->daddr, &rep.th);
670 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
671 ip_hdr(skb)->saddr, /* XXX */
672 arg.iov[0].iov_len, IPPROTO_TCP, 0);
673 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
674 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
675 /* When socket is gone, all binding information is lost.
676 * routing might fail in this case. No choice here, if we choose to force
677 * input interface, we will misroute in case of asymmetric route.
680 arg.bound_dev_if = sk->sk_bound_dev_if;
682 net = dev_net(skb_dst(skb)->dev);
683 arg.tos = ip_hdr(skb)->tos;
684 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
685 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
686 &arg, arg.iov[0].iov_len);
688 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
689 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
691 #ifdef CONFIG_TCP_MD5SIG
700 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
701 outside socket context is ugly, certainly. What can I do?
704 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
705 u32 win, u32 tsval, u32 tsecr, int oif,
706 struct tcp_md5sig_key *key,
707 int reply_flags, u8 tos)
709 const struct tcphdr *th = tcp_hdr(skb);
712 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
713 #ifdef CONFIG_TCP_MD5SIG
714 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
718 struct ip_reply_arg arg;
719 struct net *net = dev_net(skb_dst(skb)->dev);
721 memset(&rep.th, 0, sizeof(struct tcphdr));
722 memset(&arg, 0, sizeof(arg));
724 arg.iov[0].iov_base = (unsigned char *)&rep;
725 arg.iov[0].iov_len = sizeof(rep.th);
727 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
728 (TCPOPT_TIMESTAMP << 8) |
730 rep.opt[1] = htonl(tsval);
731 rep.opt[2] = htonl(tsecr);
732 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
735 /* Swap the send and the receive. */
736 rep.th.dest = th->source;
737 rep.th.source = th->dest;
738 rep.th.doff = arg.iov[0].iov_len / 4;
739 rep.th.seq = htonl(seq);
740 rep.th.ack_seq = htonl(ack);
742 rep.th.window = htons(win);
744 #ifdef CONFIG_TCP_MD5SIG
746 int offset = (tsecr) ? 3 : 0;
748 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
750 (TCPOPT_MD5SIG << 8) |
752 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
753 rep.th.doff = arg.iov[0].iov_len/4;
755 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
756 key, ip_hdr(skb)->saddr,
757 ip_hdr(skb)->daddr, &rep.th);
760 arg.flags = reply_flags;
761 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
762 ip_hdr(skb)->saddr, /* XXX */
763 arg.iov[0].iov_len, IPPROTO_TCP, 0);
764 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
766 arg.bound_dev_if = oif;
768 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
769 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
770 &arg, arg.iov[0].iov_len);
772 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
775 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
777 struct inet_timewait_sock *tw = inet_twsk(sk);
778 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
780 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
781 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
782 tcp_time_stamp + tcptw->tw_ts_offset,
785 tcp_twsk_md5_key(tcptw),
786 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
793 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
794 struct request_sock *req)
796 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
797 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
799 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
800 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
801 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
805 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
807 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
812 * Send a SYN-ACK after having received a SYN.
813 * This still operates on a request_sock only, not on a big
816 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
818 struct request_sock *req,
820 struct tcp_fastopen_cookie *foc)
822 const struct inet_request_sock *ireq = inet_rsk(req);
827 /* First, grab a route. */
828 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
831 skb = tcp_make_synack(sk, dst, req, foc);
834 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
836 skb_set_queue_mapping(skb, queue_mapping);
837 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
840 err = net_xmit_eval(err);
847 * IPv4 request_sock destructor.
849 static void tcp_v4_reqsk_destructor(struct request_sock *req)
851 kfree(inet_rsk(req)->opt);
855 * Return true if a syncookie should be sent
857 bool tcp_syn_flood_action(struct sock *sk,
858 const struct sk_buff *skb,
861 const char *msg = "Dropping request";
862 bool want_cookie = false;
863 struct listen_sock *lopt;
865 #ifdef CONFIG_SYN_COOKIES
866 if (sysctl_tcp_syncookies) {
867 msg = "Sending cookies";
869 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
872 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
874 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
875 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
876 lopt->synflood_warned = 1;
877 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
878 proto, ntohs(tcp_hdr(skb)->dest), msg);
882 EXPORT_SYMBOL(tcp_syn_flood_action);
885 * Save and compile IPv4 options into the request_sock if needed.
887 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
889 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
890 struct ip_options_rcu *dopt = NULL;
892 if (opt && opt->optlen) {
893 int opt_size = sizeof(*dopt) + opt->optlen;
895 dopt = kmalloc(opt_size, GFP_ATOMIC);
896 if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
904 #ifdef CONFIG_TCP_MD5SIG
906 * RFC2385 MD5 checksumming requires a mapping of
907 * IP address->MD5 Key.
908 * We need to maintain these in the sk structure.
911 /* Find the Key structure for an address. */
912 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
913 const union tcp_md5_addr *addr,
916 struct tcp_sock *tp = tcp_sk(sk);
917 struct tcp_md5sig_key *key;
918 unsigned int size = sizeof(struct in_addr);
919 struct tcp_md5sig_info *md5sig;
921 /* caller either holds rcu_read_lock() or socket lock */
922 md5sig = rcu_dereference_check(tp->md5sig_info,
923 sock_owned_by_user(sk) ||
924 lockdep_is_held(&sk->sk_lock.slock));
927 #if IS_ENABLED(CONFIG_IPV6)
928 if (family == AF_INET6)
929 size = sizeof(struct in6_addr);
931 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
932 if (key->family != family)
934 if (!memcmp(&key->addr, addr, size))
939 EXPORT_SYMBOL(tcp_md5_do_lookup);
941 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
942 struct sock *addr_sk)
944 union tcp_md5_addr *addr;
946 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
947 return tcp_md5_do_lookup(sk, addr, AF_INET);
949 EXPORT_SYMBOL(tcp_v4_md5_lookup);
951 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
952 struct request_sock *req)
954 union tcp_md5_addr *addr;
956 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
957 return tcp_md5_do_lookup(sk, addr, AF_INET);
960 /* This can be called on a newly created socket, from other files */
961 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
962 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
964 /* Add Key to the list */
965 struct tcp_md5sig_key *key;
966 struct tcp_sock *tp = tcp_sk(sk);
967 struct tcp_md5sig_info *md5sig;
969 key = tcp_md5_do_lookup(sk, addr, family);
971 /* Pre-existing entry - just update that one. */
972 memcpy(key->key, newkey, newkeylen);
973 key->keylen = newkeylen;
977 md5sig = rcu_dereference_protected(tp->md5sig_info,
978 sock_owned_by_user(sk));
980 md5sig = kmalloc(sizeof(*md5sig), gfp);
984 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
985 INIT_HLIST_HEAD(&md5sig->head);
986 rcu_assign_pointer(tp->md5sig_info, md5sig);
989 key = sock_kmalloc(sk, sizeof(*key), gfp);
992 if (!tcp_alloc_md5sig_pool()) {
993 sock_kfree_s(sk, key, sizeof(*key));
997 memcpy(key->key, newkey, newkeylen);
998 key->keylen = newkeylen;
999 key->family = family;
1000 memcpy(&key->addr, addr,
1001 (family == AF_INET6) ? sizeof(struct in6_addr) :
1002 sizeof(struct in_addr));
1003 hlist_add_head_rcu(&key->node, &md5sig->head);
1006 EXPORT_SYMBOL(tcp_md5_do_add);
1008 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1010 struct tcp_md5sig_key *key;
1012 key = tcp_md5_do_lookup(sk, addr, family);
1015 hlist_del_rcu(&key->node);
1016 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1017 kfree_rcu(key, rcu);
1020 EXPORT_SYMBOL(tcp_md5_do_del);
1022 static void tcp_clear_md5_list(struct sock *sk)
1024 struct tcp_sock *tp = tcp_sk(sk);
1025 struct tcp_md5sig_key *key;
1026 struct hlist_node *n;
1027 struct tcp_md5sig_info *md5sig;
1029 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1031 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1032 hlist_del_rcu(&key->node);
1033 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1034 kfree_rcu(key, rcu);
1038 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1041 struct tcp_md5sig cmd;
1042 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1044 if (optlen < sizeof(cmd))
1047 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1050 if (sin->sin_family != AF_INET)
1053 if (!cmd.tcpm_keylen)
1054 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1057 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1060 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1061 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1065 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1066 __be32 daddr, __be32 saddr, int nbytes)
1068 struct tcp4_pseudohdr *bp;
1069 struct scatterlist sg;
1071 bp = &hp->md5_blk.ip4;
1074 * 1. the TCP pseudo-header (in the order: source IP address,
1075 * destination IP address, zero-padded protocol number, and
1081 bp->protocol = IPPROTO_TCP;
1082 bp->len = cpu_to_be16(nbytes);
1084 sg_init_one(&sg, bp, sizeof(*bp));
1085 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1088 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1089 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1091 struct tcp_md5sig_pool *hp;
1092 struct hash_desc *desc;
1094 hp = tcp_get_md5sig_pool();
1096 goto clear_hash_noput;
1097 desc = &hp->md5_desc;
1099 if (crypto_hash_init(desc))
1101 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1103 if (tcp_md5_hash_header(hp, th))
1105 if (tcp_md5_hash_key(hp, key))
1107 if (crypto_hash_final(desc, md5_hash))
1110 tcp_put_md5sig_pool();
1114 tcp_put_md5sig_pool();
1116 memset(md5_hash, 0, 16);
1120 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1121 const struct sock *sk, const struct request_sock *req,
1122 const struct sk_buff *skb)
1124 struct tcp_md5sig_pool *hp;
1125 struct hash_desc *desc;
1126 const struct tcphdr *th = tcp_hdr(skb);
1127 __be32 saddr, daddr;
1130 saddr = inet_sk(sk)->inet_saddr;
1131 daddr = inet_sk(sk)->inet_daddr;
1133 saddr = inet_rsk(req)->ir_loc_addr;
1134 daddr = inet_rsk(req)->ir_rmt_addr;
1136 const struct iphdr *iph = ip_hdr(skb);
1141 hp = tcp_get_md5sig_pool();
1143 goto clear_hash_noput;
1144 desc = &hp->md5_desc;
1146 if (crypto_hash_init(desc))
1149 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1151 if (tcp_md5_hash_header(hp, th))
1153 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1155 if (tcp_md5_hash_key(hp, key))
1157 if (crypto_hash_final(desc, md5_hash))
1160 tcp_put_md5sig_pool();
1164 tcp_put_md5sig_pool();
1166 memset(md5_hash, 0, 16);
1169 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1171 static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
1172 const struct sk_buff *skb)
1175 * This gets called for each TCP segment that arrives
1176 * so we want to be efficient.
1177 * We have 3 drop cases:
1178 * o No MD5 hash and one expected.
1179 * o MD5 hash and we're not expecting one.
1180 * o MD5 hash and its wrong.
1182 const __u8 *hash_location = NULL;
1183 struct tcp_md5sig_key *hash_expected;
1184 const struct iphdr *iph = ip_hdr(skb);
1185 const struct tcphdr *th = tcp_hdr(skb);
1187 unsigned char newhash[16];
1189 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1191 hash_location = tcp_parse_md5sig_option(th);
1193 /* We've parsed the options - do we have a hash? */
1194 if (!hash_expected && !hash_location)
1197 if (hash_expected && !hash_location) {
1198 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1202 if (!hash_expected && hash_location) {
1203 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1207 /* Okay, so this is hash_expected and hash_location -
1208 * so we need to calculate the checksum.
1210 genhash = tcp_v4_md5_hash_skb(newhash,
1214 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1215 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1216 &iph->saddr, ntohs(th->source),
1217 &iph->daddr, ntohs(th->dest),
1218 genhash ? " tcp_v4_calc_md5_hash failed"
1225 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1230 ret = __tcp_v4_inbound_md5_hash(sk, skb);
1238 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
1239 struct sk_buff *skb)
1241 struct inet_request_sock *ireq = inet_rsk(req);
1243 ireq->ir_loc_addr = ip_hdr(skb)->daddr;
1244 ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
1245 ireq->no_srccheck = inet_sk(sk)->transparent;
1246 ireq->opt = tcp_v4_save_options(skb);
1249 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1250 const struct request_sock *req,
1253 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1256 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1265 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1267 .obj_size = sizeof(struct tcp_request_sock),
1268 .rtx_syn_ack = tcp_rtx_synack,
1269 .send_ack = tcp_v4_reqsk_send_ack,
1270 .destructor = tcp_v4_reqsk_destructor,
1271 .send_reset = tcp_v4_send_reset,
1272 .syn_ack_timeout = tcp_syn_ack_timeout,
1275 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1276 .mss_clamp = TCP_MSS_DEFAULT,
1277 #ifdef CONFIG_TCP_MD5SIG
1278 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1279 .calc_md5_hash = tcp_v4_md5_hash_skb,
1281 .init_req = tcp_v4_init_req,
1282 #ifdef CONFIG_SYN_COOKIES
1283 .cookie_init_seq = cookie_v4_init_sequence,
1285 .route_req = tcp_v4_route_req,
1286 .init_seq = tcp_v4_init_sequence,
1287 .send_synack = tcp_v4_send_synack,
1288 .queue_hash_add = inet_csk_reqsk_queue_hash_add,
1291 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1293 /* Never answer to SYNs send to broadcast or multicast */
1294 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1297 return tcp_conn_request(&tcp_request_sock_ops,
1298 &tcp_request_sock_ipv4_ops, sk, skb);
1301 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1304 EXPORT_SYMBOL(tcp_v4_conn_request);
1308 * The three way handshake has completed - we got a valid synack -
1309 * now create the new socket.
1311 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1312 struct request_sock *req,
1313 struct dst_entry *dst)
1315 struct inet_request_sock *ireq;
1316 struct inet_sock *newinet;
1317 struct tcp_sock *newtp;
1319 #ifdef CONFIG_TCP_MD5SIG
1320 struct tcp_md5sig_key *key;
1322 struct ip_options_rcu *inet_opt;
1324 if (sk_acceptq_is_full(sk))
1327 newsk = tcp_create_openreq_child(sk, req, skb);
1331 newsk->sk_gso_type = SKB_GSO_TCPV4;
1332 inet_sk_rx_dst_set(newsk, skb);
1334 newtp = tcp_sk(newsk);
1335 newinet = inet_sk(newsk);
1336 ireq = inet_rsk(req);
1337 newinet->inet_daddr = ireq->ir_rmt_addr;
1338 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1339 newinet->inet_saddr = ireq->ir_loc_addr;
1340 inet_opt = ireq->opt;
1341 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1343 newinet->mc_index = inet_iif(skb);
1344 newinet->mc_ttl = ip_hdr(skb)->ttl;
1345 newinet->rcv_tos = ip_hdr(skb)->tos;
1346 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1347 inet_set_txhash(newsk);
1349 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1350 newinet->inet_id = newtp->write_seq ^ jiffies;
1353 dst = inet_csk_route_child_sock(sk, newsk, req);
1357 /* syncookie case : see end of cookie_v4_check() */
1359 sk_setup_caps(newsk, dst);
1361 tcp_sync_mss(newsk, dst_mtu(dst));
1362 newtp->advmss = dst_metric_advmss(dst);
1363 if (tcp_sk(sk)->rx_opt.user_mss &&
1364 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1365 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1367 tcp_initialize_rcv_mss(newsk);
1369 #ifdef CONFIG_TCP_MD5SIG
1370 /* Copy over the MD5 key from the original socket */
1371 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1375 * We're using one, so create a matching key
1376 * on the newsk structure. If we fail to get
1377 * memory, then we end up not copying the key
1380 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1381 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1382 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1386 if (__inet_inherit_port(sk, newsk) < 0)
1388 __inet_hash_nolisten(newsk, NULL);
1393 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1397 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1400 inet_csk_prepare_forced_close(newsk);
1404 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1406 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1408 struct tcphdr *th = tcp_hdr(skb);
1409 const struct iphdr *iph = ip_hdr(skb);
1411 struct request_sock **prev;
1412 /* Find possible connection requests. */
1413 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1414 iph->saddr, iph->daddr);
1416 return tcp_check_req(sk, skb, req, prev, false);
1418 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1419 th->source, iph->daddr, th->dest, inet_iif(skb));
1422 if (nsk->sk_state != TCP_TIME_WAIT) {
1426 inet_twsk_put(inet_twsk(nsk));
1430 #ifdef CONFIG_SYN_COOKIES
1432 sk = cookie_v4_check(sk, skb, &TCP_SKB_CB(skb)->header.h4.opt);
1437 /* The socket must have it's spinlock held when we get
1440 * We have a potential double-lock case here, so even when
1441 * doing backlog processing we use the BH locking scheme.
1442 * This is because we cannot sleep with the original spinlock
1445 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1449 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1450 struct dst_entry *dst = sk->sk_rx_dst;
1452 sock_rps_save_rxhash(sk, skb);
1454 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1455 dst->ops->check(dst, 0) == NULL) {
1457 sk->sk_rx_dst = NULL;
1460 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1464 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1467 if (sk->sk_state == TCP_LISTEN) {
1468 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1473 sock_rps_save_rxhash(nsk, skb);
1474 if (tcp_child_process(sk, nsk, skb)) {
1481 sock_rps_save_rxhash(sk, skb);
1483 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1490 tcp_v4_send_reset(rsk, skb);
1493 /* Be careful here. If this function gets more complicated and
1494 * gcc suffers from register pressure on the x86, sk (in %ebx)
1495 * might be destroyed here. This current version compiles correctly,
1496 * but you have been warned.
1501 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1502 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1505 EXPORT_SYMBOL(tcp_v4_do_rcv);
1507 void tcp_v4_early_demux(struct sk_buff *skb)
1509 const struct iphdr *iph;
1510 const struct tcphdr *th;
1513 if (skb->pkt_type != PACKET_HOST)
1516 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1522 if (th->doff < sizeof(struct tcphdr) / 4)
1525 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1526 iph->saddr, th->source,
1527 iph->daddr, ntohs(th->dest),
1531 skb->destructor = sock_edemux;
1532 if (sk->sk_state != TCP_TIME_WAIT) {
1533 struct dst_entry *dst = sk->sk_rx_dst;
1536 dst = dst_check(dst, 0);
1538 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1539 skb_dst_set_noref(skb, dst);
1544 /* Packet is added to VJ-style prequeue for processing in process
1545 * context, if a reader task is waiting. Apparently, this exciting
1546 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1547 * failed somewhere. Latency? Burstiness? Well, at least now we will
1548 * see, why it failed. 8)8) --ANK
1551 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1553 struct tcp_sock *tp = tcp_sk(sk);
1555 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1558 if (skb->len <= tcp_hdrlen(skb) &&
1559 skb_queue_len(&tp->ucopy.prequeue) == 0)
1562 /* Before escaping RCU protected region, we need to take care of skb
1563 * dst. Prequeue is only enabled for established sockets.
1564 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1565 * Instead of doing full sk_rx_dst validity here, let's perform
1566 * an optimistic check.
1568 if (likely(sk->sk_rx_dst))
1573 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1574 tp->ucopy.memory += skb->truesize;
1575 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1576 struct sk_buff *skb1;
1578 BUG_ON(sock_owned_by_user(sk));
1580 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1581 sk_backlog_rcv(sk, skb1);
1582 NET_INC_STATS_BH(sock_net(sk),
1583 LINUX_MIB_TCPPREQUEUEDROPPED);
1586 tp->ucopy.memory = 0;
1587 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1588 wake_up_interruptible_sync_poll(sk_sleep(sk),
1589 POLLIN | POLLRDNORM | POLLRDBAND);
1590 if (!inet_csk_ack_scheduled(sk))
1591 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1592 (3 * tcp_rto_min(sk)) / 4,
1597 EXPORT_SYMBOL(tcp_prequeue);
1603 int tcp_v4_rcv(struct sk_buff *skb)
1605 const struct iphdr *iph;
1606 const struct tcphdr *th;
1609 struct net *net = dev_net(skb->dev);
1611 if (skb->pkt_type != PACKET_HOST)
1614 /* Count it even if it's bad */
1615 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1617 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1622 if (th->doff < sizeof(struct tcphdr) / 4)
1624 if (!pskb_may_pull(skb, th->doff * 4))
1627 /* An explanation is required here, I think.
1628 * Packet length and doff are validated by header prediction,
1629 * provided case of th->doff==0 is eliminated.
1630 * So, we defer the checks. */
1632 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1637 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1638 * barrier() makes sure compiler wont play fool^Waliasing games.
1640 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1641 sizeof(struct inet_skb_parm));
1644 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1645 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1646 skb->len - th->doff * 4);
1647 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1648 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1649 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1650 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1651 TCP_SKB_CB(skb)->sacked = 0;
1653 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1658 if (sk->sk_state == TCP_TIME_WAIT)
1661 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1662 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1663 goto discard_and_relse;
1666 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1667 goto discard_and_relse;
1669 #ifdef CONFIG_TCP_MD5SIG
1671 * We really want to reject the packet as early as possible
1673 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1674 * o There is an MD5 option and we're not expecting one
1676 if (tcp_v4_inbound_md5_hash(sk, skb))
1677 goto discard_and_relse;
1682 if (sk_filter(sk, skb))
1683 goto discard_and_relse;
1685 sk_mark_napi_id(sk, skb);
1688 bh_lock_sock_nested(sk);
1690 if (!sock_owned_by_user(sk)) {
1691 #ifdef CONFIG_NET_DMA
1692 struct tcp_sock *tp = tcp_sk(sk);
1693 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1694 tp->ucopy.dma_chan = net_dma_find_channel();
1695 if (tp->ucopy.dma_chan)
1696 ret = tcp_v4_do_rcv(sk, skb);
1700 if (!tcp_prequeue(sk, skb))
1701 ret = tcp_v4_do_rcv(sk, skb);
1703 } else if (unlikely(sk_add_backlog(sk, skb,
1704 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1706 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1707 goto discard_and_relse;
1716 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1719 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1721 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1723 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1725 tcp_v4_send_reset(NULL, skb);
1729 /* Discard frame. */
1738 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1739 inet_twsk_put(inet_twsk(sk));
1743 if (skb->len < (th->doff << 2)) {
1744 inet_twsk_put(inet_twsk(sk));
1747 if (tcp_checksum_complete(skb)) {
1748 inet_twsk_put(inet_twsk(sk));
1751 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1753 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1755 iph->saddr, th->source,
1756 iph->daddr, th->dest,
1759 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1760 inet_twsk_put(inet_twsk(sk));
1764 /* Fall through to ACK */
1767 tcp_v4_timewait_ack(sk, skb);
1771 case TCP_TW_SUCCESS:;
1776 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1777 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1778 .twsk_unique = tcp_twsk_unique,
1779 .twsk_destructor= tcp_twsk_destructor,
1782 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1784 struct dst_entry *dst = skb_dst(skb);
1788 sk->sk_rx_dst = dst;
1789 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1792 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1794 const struct inet_connection_sock_af_ops ipv4_specific = {
1795 .queue_xmit = ip_queue_xmit,
1796 .send_check = tcp_v4_send_check,
1797 .rebuild_header = inet_sk_rebuild_header,
1798 .sk_rx_dst_set = inet_sk_rx_dst_set,
1799 .conn_request = tcp_v4_conn_request,
1800 .syn_recv_sock = tcp_v4_syn_recv_sock,
1801 .net_header_len = sizeof(struct iphdr),
1802 .setsockopt = ip_setsockopt,
1803 .getsockopt = ip_getsockopt,
1804 .addr2sockaddr = inet_csk_addr2sockaddr,
1805 .sockaddr_len = sizeof(struct sockaddr_in),
1806 .bind_conflict = inet_csk_bind_conflict,
1807 #ifdef CONFIG_COMPAT
1808 .compat_setsockopt = compat_ip_setsockopt,
1809 .compat_getsockopt = compat_ip_getsockopt,
1811 .mtu_reduced = tcp_v4_mtu_reduced,
1813 EXPORT_SYMBOL(ipv4_specific);
1815 #ifdef CONFIG_TCP_MD5SIG
1816 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1817 .md5_lookup = tcp_v4_md5_lookup,
1818 .calc_md5_hash = tcp_v4_md5_hash_skb,
1819 .md5_parse = tcp_v4_parse_md5_keys,
1823 /* NOTE: A lot of things set to zero explicitly by call to
1824 * sk_alloc() so need not be done here.
1826 static int tcp_v4_init_sock(struct sock *sk)
1828 struct inet_connection_sock *icsk = inet_csk(sk);
1832 icsk->icsk_af_ops = &ipv4_specific;
1834 #ifdef CONFIG_TCP_MD5SIG
1835 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1841 void tcp_v4_destroy_sock(struct sock *sk)
1843 struct tcp_sock *tp = tcp_sk(sk);
1845 tcp_clear_xmit_timers(sk);
1847 tcp_cleanup_congestion_control(sk);
1849 /* Cleanup up the write buffer. */
1850 tcp_write_queue_purge(sk);
1852 /* Cleans up our, hopefully empty, out_of_order_queue. */
1853 __skb_queue_purge(&tp->out_of_order_queue);
1855 #ifdef CONFIG_TCP_MD5SIG
1856 /* Clean up the MD5 key list, if any */
1857 if (tp->md5sig_info) {
1858 tcp_clear_md5_list(sk);
1859 kfree_rcu(tp->md5sig_info, rcu);
1860 tp->md5sig_info = NULL;
1864 #ifdef CONFIG_NET_DMA
1865 /* Cleans up our sk_async_wait_queue */
1866 __skb_queue_purge(&sk->sk_async_wait_queue);
1869 /* Clean prequeue, it must be empty really */
1870 __skb_queue_purge(&tp->ucopy.prequeue);
1872 /* Clean up a referenced TCP bind bucket. */
1873 if (inet_csk(sk)->icsk_bind_hash)
1876 BUG_ON(tp->fastopen_rsk != NULL);
1878 /* If socket is aborted during connect operation */
1879 tcp_free_fastopen_req(tp);
1881 sk_sockets_allocated_dec(sk);
1882 sock_release_memcg(sk);
1884 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1886 #ifdef CONFIG_PROC_FS
1887 /* Proc filesystem TCP sock list dumping. */
1890 * Get next listener socket follow cur. If cur is NULL, get first socket
1891 * starting from bucket given in st->bucket; when st->bucket is zero the
1892 * very first socket in the hash table is returned.
1894 static void *listening_get_next(struct seq_file *seq, void *cur)
1896 struct inet_connection_sock *icsk;
1897 struct hlist_nulls_node *node;
1898 struct sock *sk = cur;
1899 struct inet_listen_hashbucket *ilb;
1900 struct tcp_iter_state *st = seq->private;
1901 struct net *net = seq_file_net(seq);
1904 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1905 spin_lock_bh(&ilb->lock);
1906 sk = sk_nulls_head(&ilb->head);
1910 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1914 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1915 struct request_sock *req = cur;
1917 icsk = inet_csk(st->syn_wait_sk);
1921 if (req->rsk_ops->family == st->family) {
1927 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1930 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1932 sk = sk_nulls_next(st->syn_wait_sk);
1933 st->state = TCP_SEQ_STATE_LISTENING;
1934 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1936 icsk = inet_csk(sk);
1937 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1938 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1940 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1941 sk = sk_nulls_next(sk);
1944 sk_nulls_for_each_from(sk, node) {
1945 if (!net_eq(sock_net(sk), net))
1947 if (sk->sk_family == st->family) {
1951 icsk = inet_csk(sk);
1952 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1953 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1955 st->uid = sock_i_uid(sk);
1956 st->syn_wait_sk = sk;
1957 st->state = TCP_SEQ_STATE_OPENREQ;
1961 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1963 spin_unlock_bh(&ilb->lock);
1965 if (++st->bucket < INET_LHTABLE_SIZE) {
1966 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1967 spin_lock_bh(&ilb->lock);
1968 sk = sk_nulls_head(&ilb->head);
1976 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1978 struct tcp_iter_state *st = seq->private;
1983 rc = listening_get_next(seq, NULL);
1985 while (rc && *pos) {
1986 rc = listening_get_next(seq, rc);
1992 static inline bool empty_bucket(const struct tcp_iter_state *st)
1994 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1998 * Get first established socket starting from bucket given in st->bucket.
1999 * If st->bucket is zero, the very first socket in the hash is returned.
2001 static void *established_get_first(struct seq_file *seq)
2003 struct tcp_iter_state *st = seq->private;
2004 struct net *net = seq_file_net(seq);
2008 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2010 struct hlist_nulls_node *node;
2011 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2013 /* Lockless fast path for the common case of empty buckets */
2014 if (empty_bucket(st))
2018 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2019 if (sk->sk_family != st->family ||
2020 !net_eq(sock_net(sk), net)) {
2026 spin_unlock_bh(lock);
2032 static void *established_get_next(struct seq_file *seq, void *cur)
2034 struct sock *sk = cur;
2035 struct hlist_nulls_node *node;
2036 struct tcp_iter_state *st = seq->private;
2037 struct net *net = seq_file_net(seq);
2042 sk = sk_nulls_next(sk);
2044 sk_nulls_for_each_from(sk, node) {
2045 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2049 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2051 return established_get_first(seq);
2054 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2056 struct tcp_iter_state *st = seq->private;
2060 rc = established_get_first(seq);
2063 rc = established_get_next(seq, rc);
2069 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2072 struct tcp_iter_state *st = seq->private;
2074 st->state = TCP_SEQ_STATE_LISTENING;
2075 rc = listening_get_idx(seq, &pos);
2078 st->state = TCP_SEQ_STATE_ESTABLISHED;
2079 rc = established_get_idx(seq, pos);
2085 static void *tcp_seek_last_pos(struct seq_file *seq)
2087 struct tcp_iter_state *st = seq->private;
2088 int offset = st->offset;
2089 int orig_num = st->num;
2092 switch (st->state) {
2093 case TCP_SEQ_STATE_OPENREQ:
2094 case TCP_SEQ_STATE_LISTENING:
2095 if (st->bucket >= INET_LHTABLE_SIZE)
2097 st->state = TCP_SEQ_STATE_LISTENING;
2098 rc = listening_get_next(seq, NULL);
2099 while (offset-- && rc)
2100 rc = listening_get_next(seq, rc);
2104 st->state = TCP_SEQ_STATE_ESTABLISHED;
2106 case TCP_SEQ_STATE_ESTABLISHED:
2107 if (st->bucket > tcp_hashinfo.ehash_mask)
2109 rc = established_get_first(seq);
2110 while (offset-- && rc)
2111 rc = established_get_next(seq, rc);
2119 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2121 struct tcp_iter_state *st = seq->private;
2124 if (*pos && *pos == st->last_pos) {
2125 rc = tcp_seek_last_pos(seq);
2130 st->state = TCP_SEQ_STATE_LISTENING;
2134 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2137 st->last_pos = *pos;
2141 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2143 struct tcp_iter_state *st = seq->private;
2146 if (v == SEQ_START_TOKEN) {
2147 rc = tcp_get_idx(seq, 0);
2151 switch (st->state) {
2152 case TCP_SEQ_STATE_OPENREQ:
2153 case TCP_SEQ_STATE_LISTENING:
2154 rc = listening_get_next(seq, v);
2156 st->state = TCP_SEQ_STATE_ESTABLISHED;
2159 rc = established_get_first(seq);
2162 case TCP_SEQ_STATE_ESTABLISHED:
2163 rc = established_get_next(seq, v);
2168 st->last_pos = *pos;
2172 static void tcp_seq_stop(struct seq_file *seq, void *v)
2174 struct tcp_iter_state *st = seq->private;
2176 switch (st->state) {
2177 case TCP_SEQ_STATE_OPENREQ:
2179 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2180 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2182 case TCP_SEQ_STATE_LISTENING:
2183 if (v != SEQ_START_TOKEN)
2184 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2186 case TCP_SEQ_STATE_ESTABLISHED:
2188 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2193 int tcp_seq_open(struct inode *inode, struct file *file)
2195 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2196 struct tcp_iter_state *s;
2199 err = seq_open_net(inode, file, &afinfo->seq_ops,
2200 sizeof(struct tcp_iter_state));
2204 s = ((struct seq_file *)file->private_data)->private;
2205 s->family = afinfo->family;
2209 EXPORT_SYMBOL(tcp_seq_open);
2211 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2214 struct proc_dir_entry *p;
2216 afinfo->seq_ops.start = tcp_seq_start;
2217 afinfo->seq_ops.next = tcp_seq_next;
2218 afinfo->seq_ops.stop = tcp_seq_stop;
2220 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2221 afinfo->seq_fops, afinfo);
2226 EXPORT_SYMBOL(tcp_proc_register);
2228 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2230 remove_proc_entry(afinfo->name, net->proc_net);
2232 EXPORT_SYMBOL(tcp_proc_unregister);
2234 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2235 struct seq_file *f, int i, kuid_t uid)
2237 const struct inet_request_sock *ireq = inet_rsk(req);
2238 long delta = req->expires - jiffies;
2240 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2241 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2244 ntohs(inet_sk(sk)->inet_sport),
2246 ntohs(ireq->ir_rmt_port),
2248 0, 0, /* could print option size, but that is af dependent. */
2249 1, /* timers active (only the expire timer) */
2250 jiffies_delta_to_clock_t(delta),
2252 from_kuid_munged(seq_user_ns(f), uid),
2253 0, /* non standard timer */
2254 0, /* open_requests have no inode */
2255 atomic_read(&sk->sk_refcnt),
2259 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2262 unsigned long timer_expires;
2263 const struct tcp_sock *tp = tcp_sk(sk);
2264 const struct inet_connection_sock *icsk = inet_csk(sk);
2265 const struct inet_sock *inet = inet_sk(sk);
2266 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2267 __be32 dest = inet->inet_daddr;
2268 __be32 src = inet->inet_rcv_saddr;
2269 __u16 destp = ntohs(inet->inet_dport);
2270 __u16 srcp = ntohs(inet->inet_sport);
2273 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2274 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2275 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2277 timer_expires = icsk->icsk_timeout;
2278 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2280 timer_expires = icsk->icsk_timeout;
2281 } else if (timer_pending(&sk->sk_timer)) {
2283 timer_expires = sk->sk_timer.expires;
2286 timer_expires = jiffies;
2289 if (sk->sk_state == TCP_LISTEN)
2290 rx_queue = sk->sk_ack_backlog;
2293 * because we dont lock socket, we might find a transient negative value
2295 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2297 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2298 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2299 i, src, srcp, dest, destp, sk->sk_state,
2300 tp->write_seq - tp->snd_una,
2303 jiffies_delta_to_clock_t(timer_expires - jiffies),
2304 icsk->icsk_retransmits,
2305 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2306 icsk->icsk_probes_out,
2308 atomic_read(&sk->sk_refcnt), sk,
2309 jiffies_to_clock_t(icsk->icsk_rto),
2310 jiffies_to_clock_t(icsk->icsk_ack.ato),
2311 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2313 sk->sk_state == TCP_LISTEN ?
2314 (fastopenq ? fastopenq->max_qlen : 0) :
2315 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2318 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2319 struct seq_file *f, int i)
2323 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2325 dest = tw->tw_daddr;
2326 src = tw->tw_rcv_saddr;
2327 destp = ntohs(tw->tw_dport);
2328 srcp = ntohs(tw->tw_sport);
2330 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2331 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2332 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2333 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2334 atomic_read(&tw->tw_refcnt), tw);
2339 static int tcp4_seq_show(struct seq_file *seq, void *v)
2341 struct tcp_iter_state *st;
2342 struct sock *sk = v;
2344 seq_setwidth(seq, TMPSZ - 1);
2345 if (v == SEQ_START_TOKEN) {
2346 seq_puts(seq, " sl local_address rem_address st tx_queue "
2347 "rx_queue tr tm->when retrnsmt uid timeout "
2353 switch (st->state) {
2354 case TCP_SEQ_STATE_LISTENING:
2355 case TCP_SEQ_STATE_ESTABLISHED:
2356 if (sk->sk_state == TCP_TIME_WAIT)
2357 get_timewait4_sock(v, seq, st->num);
2359 get_tcp4_sock(v, seq, st->num);
2361 case TCP_SEQ_STATE_OPENREQ:
2362 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2370 static const struct file_operations tcp_afinfo_seq_fops = {
2371 .owner = THIS_MODULE,
2372 .open = tcp_seq_open,
2374 .llseek = seq_lseek,
2375 .release = seq_release_net
2378 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2381 .seq_fops = &tcp_afinfo_seq_fops,
2383 .show = tcp4_seq_show,
2387 static int __net_init tcp4_proc_init_net(struct net *net)
2389 return tcp_proc_register(net, &tcp4_seq_afinfo);
2392 static void __net_exit tcp4_proc_exit_net(struct net *net)
2394 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2397 static struct pernet_operations tcp4_net_ops = {
2398 .init = tcp4_proc_init_net,
2399 .exit = tcp4_proc_exit_net,
2402 int __init tcp4_proc_init(void)
2404 return register_pernet_subsys(&tcp4_net_ops);
2407 void tcp4_proc_exit(void)
2409 unregister_pernet_subsys(&tcp4_net_ops);
2411 #endif /* CONFIG_PROC_FS */
2413 struct proto tcp_prot = {
2415 .owner = THIS_MODULE,
2417 .connect = tcp_v4_connect,
2418 .disconnect = tcp_disconnect,
2419 .accept = inet_csk_accept,
2421 .init = tcp_v4_init_sock,
2422 .destroy = tcp_v4_destroy_sock,
2423 .shutdown = tcp_shutdown,
2424 .setsockopt = tcp_setsockopt,
2425 .getsockopt = tcp_getsockopt,
2426 .recvmsg = tcp_recvmsg,
2427 .sendmsg = tcp_sendmsg,
2428 .sendpage = tcp_sendpage,
2429 .backlog_rcv = tcp_v4_do_rcv,
2430 .release_cb = tcp_release_cb,
2432 .unhash = inet_unhash,
2433 .get_port = inet_csk_get_port,
2434 .enter_memory_pressure = tcp_enter_memory_pressure,
2435 .stream_memory_free = tcp_stream_memory_free,
2436 .sockets_allocated = &tcp_sockets_allocated,
2437 .orphan_count = &tcp_orphan_count,
2438 .memory_allocated = &tcp_memory_allocated,
2439 .memory_pressure = &tcp_memory_pressure,
2440 .sysctl_mem = sysctl_tcp_mem,
2441 .sysctl_wmem = sysctl_tcp_wmem,
2442 .sysctl_rmem = sysctl_tcp_rmem,
2443 .max_header = MAX_TCP_HEADER,
2444 .obj_size = sizeof(struct tcp_sock),
2445 .slab_flags = SLAB_DESTROY_BY_RCU,
2446 .twsk_prot = &tcp_timewait_sock_ops,
2447 .rsk_prot = &tcp_request_sock_ops,
2448 .h.hashinfo = &tcp_hashinfo,
2449 .no_autobind = true,
2450 #ifdef CONFIG_COMPAT
2451 .compat_setsockopt = compat_tcp_setsockopt,
2452 .compat_getsockopt = compat_tcp_getsockopt,
2454 #ifdef CONFIG_MEMCG_KMEM
2455 .init_cgroup = tcp_init_cgroup,
2456 .destroy_cgroup = tcp_destroy_cgroup,
2457 .proto_cgroup = tcp_proto_cgroup,
2460 EXPORT_SYMBOL(tcp_prot);
2462 static int __net_init tcp_sk_init(struct net *net)
2464 net->ipv4.sysctl_tcp_ecn = 2;
2468 static void __net_exit tcp_sk_exit(struct net *net)
2472 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2474 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2477 static struct pernet_operations __net_initdata tcp_sk_ops = {
2478 .init = tcp_sk_init,
2479 .exit = tcp_sk_exit,
2480 .exit_batch = tcp_sk_exit_batch,
2483 void __init tcp_v4_init(void)
2485 inet_hashinfo_init(&tcp_hashinfo);
2486 if (register_pernet_subsys(&tcp_sk_ops))
2487 panic("Failed to create the TCP control socket.\n");