Lines Matching +full:non +full:- +full:urgent
1 // SPDX-License-Identifier: GPL-2.0-or-later
12 * Corey Minyard <wf-[email protected]>
13 * Florian La Roche, <[email protected]-sb.de>
25 * sk->inuse=1 and was trying to connect
51 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Fixed assorted sk->rqueue->next errors
63 * Alan Cox : FIN with no memory -> CRASH
76 * window non shrink trick.
113 * Alan Cox : Changed the semantics of sk->socket to
164 * Matt Day : poll()->select() match BSD precisely on error
166 * Marc Tamsky : Various sk->prot->retransmits and
167 * sk->retransmits misupdating fixed.
172 * Alan Cox : Look up device on a retransmit - routes may
214 * waiting for final ack in three-way handshake.
332 * Technical note: it is used by multiple contexts non atomically.
348 val--; in tcp_enter_memory_pressure()
363 jiffies_to_msecs(jiffies - val)); in tcp_leave_memory_pressure()
394 while (--retrans) { in retrans_to_secs()
406 u32 rate = READ_ONCE(tp->rate_delivered); in tcp_compute_delivery_rate()
407 u32 intv = READ_ONCE(tp->rate_interval_us); in tcp_compute_delivery_rate()
411 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; in tcp_compute_delivery_rate()
417 /* Address-family independent initialization for a tcp_sock.
428 tp->out_of_order_queue = RB_ROOT; in tcp_init_sock()
429 sk->tcp_rtx_queue = RB_ROOT; in tcp_init_sock()
431 INIT_LIST_HEAD(&tp->tsq_node); in tcp_init_sock()
432 INIT_LIST_HEAD(&tp->tsorted_sent_queue); in tcp_init_sock()
434 icsk->icsk_rto = TCP_TIMEOUT_INIT; in tcp_init_sock()
435 rto_min_us = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_min_us); in tcp_init_sock()
436 icsk->icsk_rto_min = usecs_to_jiffies(rto_min_us); in tcp_init_sock()
437 icsk->icsk_delack_max = TCP_DELACK_MAX; in tcp_init_sock()
438 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); in tcp_init_sock()
439 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); in tcp_init_sock()
442 * initial SYN frame in their delayed-ACK and congestion control in tcp_init_sock()
444 * efficiently to them. -DaveM in tcp_init_sock()
449 tp->app_limited = ~0U; in tcp_init_sock()
450 tp->rate_app_limited = 1; in tcp_init_sock()
452 /* See draft-stevens-tcpca-spec-01 for discussion of the in tcp_init_sock()
455 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; in tcp_init_sock()
456 tp->snd_cwnd_clamp = ~0; in tcp_init_sock()
457 tp->mss_cache = TCP_MSS_DEFAULT; in tcp_init_sock()
459 tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); in tcp_init_sock()
462 tp->tsoffset = 0; in tcp_init_sock()
463 tp->rack.reo_wnd_steps = 1; in tcp_init_sock()
465 sk->sk_write_space = sk_stream_write_space; in tcp_init_sock()
468 icsk->icsk_sync_mss = tcp_sync_mss; in tcp_init_sock()
470 WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); in tcp_init_sock()
471 WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); in tcp_init_sock()
474 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); in tcp_init_sock()
476 xa_init_flags(&sk->sk_user_frags, XA_FLAGS_ALLOC1); in tcp_init_sock()
483 u32 tsflags = sockc->tsflags; in tcp_tx_timestamp()
489 sock_tx_timestamp(sk, sockc, &shinfo->tx_flags); in tcp_tx_timestamp()
491 tcb->txstamp_ack = 1; in tcp_tx_timestamp()
493 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; in tcp_tx_timestamp()
514 struct sock *sk = sock->sk; in tcp_poll()
543 * Check-me. in tcp_poll()
548 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP in tcp_poll()
554 * why EPOLLHUP is incompatible with EPOLLOUT. --ANK in tcp_poll()
557 * blocking on fresh not-connected or disconnected socket. --ANK in tcp_poll()
559 shutdown = READ_ONCE(sk->sk_shutdown); in tcp_poll()
567 (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { in tcp_poll()
569 u16 urg_data = READ_ONCE(tp->urg_data); in tcp_poll()
572 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && in tcp_poll()
584 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in tcp_poll()
610 if (READ_ONCE(sk->sk_err) || in tcp_poll()
611 !skb_queue_empty_lockless(&sk->sk_error_queue)) in tcp_poll()
626 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
627 return -EINVAL; in tcp_ioctl()
634 answ = READ_ONCE(tp->urg_data) && in tcp_ioctl()
635 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); in tcp_ioctl()
638 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
639 return -EINVAL; in tcp_ioctl()
641 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
644 answ = READ_ONCE(tp->write_seq) - tp->snd_una; in tcp_ioctl()
647 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
648 return -EINVAL; in tcp_ioctl()
650 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
653 answ = READ_ONCE(tp->write_seq) - in tcp_ioctl()
654 READ_ONCE(tp->snd_nxt); in tcp_ioctl()
657 return -ENOIOCTLCMD; in tcp_ioctl()
667 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_mark_push()
668 tp->pushed_seq = tp->write_seq; in tcp_mark_push()
673 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); in forced_push()
681 tcb->seq = tcb->end_seq = tp->write_seq; in tcp_skb_entail()
682 tcb->tcp_flags = TCPHDR_ACK; in tcp_skb_entail()
685 sk_wmem_queued_add(sk, skb->truesize); in tcp_skb_entail()
686 sk_mem_charge(sk, skb->truesize); in tcp_skb_entail()
687 if (tp->nonagle & TCP_NAGLE_PUSH) in tcp_skb_entail()
688 tp->nonagle &= ~TCP_NAGLE_PUSH; in tcp_skb_entail()
696 tp->snd_up = tp->write_seq; in tcp_mark_urg()
712 return skb->len < size_goal && in tcp_should_autocork()
713 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) && in tcp_should_autocork()
715 refcount_read(&sk->sk_wmem_alloc) > skb->truesize && in tcp_should_autocork()
736 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { in tcp_push()
738 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in tcp_push()
744 if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) in tcp_push()
757 struct tcp_splice_state *tss = rd_desc->arg.data; in tcp_splice_data_recv()
760 ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, in tcp_splice_data_recv()
761 min(rd_desc->count, len), tss->flags); in tcp_splice_data_recv()
763 rd_desc->count -= ret; in tcp_splice_data_recv()
772 .count = tss->len, in __tcp_splice_read()
779 * tcp_splice_read - splice data from TCP socket to a pipe
794 struct sock *sk = sock->sk; in tcp_splice_read()
809 return -ESPIPE; in tcp_splice_read()
815 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); in tcp_splice_read()
825 if (sk->sk_err) { in tcp_splice_read()
829 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_splice_read()
831 if (sk->sk_state == TCP_CLOSE) { in tcp_splice_read()
836 ret = -ENOTCONN; in tcp_splice_read()
840 ret = -EAGAIN; in tcp_splice_read()
847 if (!skb_queue_empty(&sk->sk_receive_queue)) in tcp_splice_read()
858 tss.len -= ret; in tcp_splice_read()
866 if (sk->sk_err || sk->sk_state == TCP_CLOSE || in tcp_splice_read()
867 (sk->sk_shutdown & RCV_SHUTDOWN) || in tcp_splice_read()
890 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in tcp_stream_alloc_skb()
893 sk_forced_mem_schedule(sk, skb->truesize); in tcp_stream_alloc_skb()
895 mem_scheduled = sk_wmem_schedule(sk, skb->truesize); in tcp_stream_alloc_skb()
899 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_stream_alloc_skb()
900 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); in tcp_stream_alloc_skb()
905 sk->sk_prot->enter_memory_pressure(sk); in tcp_stream_alloc_skb()
921 new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size); in tcp_xmit_size_goal()
924 size_goal = tp->gso_segs * mss_now; in tcp_xmit_size_goal()
927 tp->gso_segs = min_t(u16, new_size_goal / mss_now, in tcp_xmit_size_goal()
928 sk->sk_gso_max_segs); in tcp_xmit_size_goal()
929 size_goal = tp->gso_segs * mss_now; in tcp_xmit_size_goal()
955 if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { in tcp_remove_empty_skb()
967 u32 extra = skb->truesize - in tcp_downgrade_zcopy_pure()
971 return -ENOMEM; in tcp_downgrade_zcopy_pure()
974 skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; in tcp_downgrade_zcopy_pure()
988 * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] in tcp_wmem_schedule()
991 left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued; in tcp_wmem_schedule()
994 return min(copy, sk->sk_forward_alloc); in tcp_wmem_schedule()
999 if (tp->fastopen_req) { in tcp_free_fastopen_req()
1000 kfree(tp->fastopen_req); in tcp_free_fastopen_req()
1001 tp->fastopen_req = NULL; in tcp_free_fastopen_req()
1010 struct sockaddr *uaddr = msg->msg_name; in tcp_sendmsg_fastopen()
1013 if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & in tcp_sendmsg_fastopen()
1015 (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && in tcp_sendmsg_fastopen()
1016 uaddr->sa_family == AF_UNSPEC)) in tcp_sendmsg_fastopen()
1017 return -EOPNOTSUPP; in tcp_sendmsg_fastopen()
1018 if (tp->fastopen_req) in tcp_sendmsg_fastopen()
1019 return -EALREADY; /* Another Fast Open is in progress */ in tcp_sendmsg_fastopen()
1021 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), in tcp_sendmsg_fastopen()
1022 sk->sk_allocation); in tcp_sendmsg_fastopen()
1023 if (unlikely(!tp->fastopen_req)) in tcp_sendmsg_fastopen()
1024 return -ENOBUFS; in tcp_sendmsg_fastopen()
1025 tp->fastopen_req->data = msg; in tcp_sendmsg_fastopen()
1026 tp->fastopen_req->size = size; in tcp_sendmsg_fastopen()
1027 tp->fastopen_req->uarg = uarg; in tcp_sendmsg_fastopen()
1034 inet->inet_dport = 0; in tcp_sendmsg_fastopen()
1035 sk->sk_route_caps = 0; in tcp_sendmsg_fastopen()
1038 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; in tcp_sendmsg_fastopen()
1039 err = __inet_stream_connect(sk->sk_socket, uaddr, in tcp_sendmsg_fastopen()
1040 msg->msg_namelen, flags, 1); in tcp_sendmsg_fastopen()
1044 if (tp->fastopen_req) { in tcp_sendmsg_fastopen()
1045 *copied = tp->fastopen_req->copied; in tcp_sendmsg_fastopen()
1064 flags = msg->msg_flags; in tcp_sendmsg_locked()
1067 if (msg->msg_ubuf) { in tcp_sendmsg_locked()
1068 uarg = msg->msg_ubuf; in tcp_sendmsg_locked()
1069 if (sk->sk_route_caps & NETIF_F_SG) in tcp_sendmsg_locked()
1075 err = -ENOBUFS; in tcp_sendmsg_locked()
1078 if (sk->sk_route_caps & NETIF_F_SG) in tcp_sendmsg_locked()
1081 uarg_to_msgzc(uarg)->zerocopy = 0; in tcp_sendmsg_locked()
1083 } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) { in tcp_sendmsg_locked()
1084 if (sk->sk_route_caps & NETIF_F_SG) in tcp_sendmsg_locked()
1090 !tp->repair) { in tcp_sendmsg_locked()
1092 if (err == -EINPROGRESS && copied_syn > 0) in tcp_sendmsg_locked()
1100 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ in tcp_sendmsg_locked()
1106 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && in tcp_sendmsg_locked()
1113 if (unlikely(tp->repair)) { in tcp_sendmsg_locked()
1114 if (tp->repair_queue == TCP_RECV_QUEUE) { in tcp_sendmsg_locked()
1119 err = -EINVAL; in tcp_sendmsg_locked()
1120 if (tp->repair_queue == TCP_NO_QUEUE) in tcp_sendmsg_locked()
1127 if (msg->msg_controllen) { in tcp_sendmsg_locked()
1130 err = -EINVAL; in tcp_sendmsg_locked()
1144 err = -EPIPE; in tcp_sendmsg_locked()
1145 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) in tcp_sendmsg_locked()
1153 copy = size_goal - skb->len; in tcp_sendmsg_locked()
1168 skb = tcp_stream_alloc_skb(sk, sk->sk_allocation, in tcp_sendmsg_locked()
1176 skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); in tcp_sendmsg_locked()
1185 if (tp->repair) in tcp_sendmsg_locked()
1186 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; in tcp_sendmsg_locked()
1195 int i = skb_shinfo(skb)->nr_frags; in tcp_sendmsg_locked()
1201 if (!skb_can_coalesce(skb, i, pfrag->page, in tcp_sendmsg_locked()
1202 pfrag->offset)) { in tcp_sendmsg_locked()
1210 copy = min_t(int, copy, pfrag->size - pfrag->offset); in tcp_sendmsg_locked()
1222 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, in tcp_sendmsg_locked()
1223 pfrag->page, in tcp_sendmsg_locked()
1224 pfrag->offset, in tcp_sendmsg_locked()
1231 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); in tcp_sendmsg_locked()
1233 skb_fill_page_desc(skb, i, pfrag->page, in tcp_sendmsg_locked()
1234 pfrag->offset, copy); in tcp_sendmsg_locked()
1235 page_ref_inc(pfrag->page); in tcp_sendmsg_locked()
1237 pfrag->offset += copy; in tcp_sendmsg_locked()
1242 if (!skb->len) in tcp_sendmsg_locked()
1243 skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY; in tcp_sendmsg_locked()
1252 if (err == -EMSGSIZE || err == -EEXIST) { in tcp_sendmsg_locked()
1267 err = skb_splice_from_iter(skb, &msg->msg_iter, copy, in tcp_sendmsg_locked()
1268 sk->sk_allocation); in tcp_sendmsg_locked()
1270 if (err == -EMSGSIZE) { in tcp_sendmsg_locked()
1279 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; in tcp_sendmsg_locked()
1286 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; in tcp_sendmsg_locked()
1288 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); in tcp_sendmsg_locked()
1289 TCP_SKB_CB(skb)->end_seq += copy; in tcp_sendmsg_locked()
1295 TCP_SKB_CB(skb)->eor = 1; in tcp_sendmsg_locked()
1299 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) in tcp_sendmsg_locked()
1310 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in tcp_sendmsg_locked()
1326 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in tcp_sendmsg_locked()
1329 /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ in tcp_sendmsg_locked()
1330 if (uarg && !msg->msg_ubuf) in tcp_sendmsg_locked()
1340 /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ in tcp_sendmsg_locked()
1341 if (uarg && !msg->msg_ubuf) in tcp_sendmsg_locked()
1345 if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { in tcp_sendmsg_locked()
1346 sk->sk_write_space(sk); in tcp_sendmsg_locked()
1367 struct sock *sk = sock->sk; in tcp_splice_eof()
1376 tcp_push(sk, 0, mss_now, tp->nonagle, size_goal); in tcp_splice_eof()
1382 * Handle reading urgent data. BSD has very simple semantics for
1391 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || in tcp_recv_urg()
1392 tp->urg_data == TCP_URG_READ) in tcp_recv_urg()
1393 return -EINVAL; /* Yes this is right ! */ in tcp_recv_urg()
1395 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) in tcp_recv_urg()
1396 return -ENOTCONN; in tcp_recv_urg()
1398 if (tp->urg_data & TCP_URG_VALID) { in tcp_recv_urg()
1400 char c = tp->urg_data; in tcp_recv_urg()
1403 WRITE_ONCE(tp->urg_data, TCP_URG_READ); in tcp_recv_urg()
1405 /* Read urgent data. */ in tcp_recv_urg()
1406 msg->msg_flags |= MSG_OOB; in tcp_recv_urg()
1413 msg->msg_flags |= MSG_TRUNC; in tcp_recv_urg()
1415 return err ? -EFAULT : len; in tcp_recv_urg()
1418 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) in tcp_recv_urg()
1425 * Mike <[email protected]-karlsruhe.de> in tcp_recv_urg()
1427 return -EAGAIN; in tcp_recv_urg()
1435 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { in tcp_peek_sndq()
1436 err = skb_copy_datagram_msg(skb, 0, msg, skb->len); in tcp_peek_sndq()
1439 copied += skb->len; in tcp_peek_sndq()
1442 skb_queue_walk(&sk->sk_write_queue, skb) { in tcp_peek_sndq()
1443 err = skb_copy_datagram_msg(skb, 0, msg, skb->len); in tcp_peek_sndq()
1447 copied += skb->len; in tcp_peek_sndq()
1467 if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ in __tcp_cleanup_rbuf()
1468 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || in __tcp_cleanup_rbuf()
1476 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || in __tcp_cleanup_rbuf()
1477 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && in __tcp_cleanup_rbuf()
1479 !atomic_read(&sk->sk_rmem_alloc))) in __tcp_cleanup_rbuf()
1483 /* We send an ACK if we can now advertise a non-zero window in __tcp_cleanup_rbuf()
1489 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { in __tcp_cleanup_rbuf()
1493 if (2*rcv_window_now <= tp->window_clamp) { in __tcp_cleanup_rbuf()
1511 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in tcp_cleanup_rbuf()
1514 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), in tcp_cleanup_rbuf()
1516 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); in tcp_cleanup_rbuf()
1522 __skb_unlink(skb, &sk->sk_receive_queue); in tcp_eat_recv_skb()
1523 if (likely(skb->destructor == sock_rfree)) { in tcp_eat_recv_skb()
1525 skb->destructor = NULL; in tcp_eat_recv_skb()
1526 skb->sk = NULL; in tcp_eat_recv_skb()
1537 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { in tcp_recv_skb()
1538 offset = seq - TCP_SKB_CB(skb)->seq; in tcp_recv_skb()
1539 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in tcp_recv_skb()
1541 offset--; in tcp_recv_skb()
1543 if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { in tcp_recv_skb()
1562 * - It is assumed that the socket was locked by the caller.
1563 * - The routine does not block.
1564 * - At present, there is no support for reading OOB data
1578 if (sk->sk_state == TCP_LISTEN) in __tcp_read_sock()
1579 return -ENOTCONN; in __tcp_read_sock()
1581 if (offset < skb->len) { in __tcp_read_sock()
1585 len = skb->len - offset; in __tcp_read_sock()
1586 /* Stop reading if we hit a patch of urgent data */ in __tcp_read_sock()
1587 if (unlikely(tp->urg_data)) { in __tcp_read_sock()
1588 u32 urg_offset = tp->urg_seq - seq; in __tcp_read_sock()
1611 skb = tcp_recv_skb(sk, seq - 1, &offset); in __tcp_read_sock()
1617 if (offset + 1 != skb->len) in __tcp_read_sock()
1620 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { in __tcp_read_sock()
1626 if (!desc->count) in __tcp_read_sock()
1650 &tcp_sk(sk)->copied_seq); in tcp_read_sock()
1666 if (sk->sk_state == TCP_LISTEN) in tcp_read_skb()
1667 return -ENOTCONN; in tcp_read_skb()
1669 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { in tcp_read_skb()
1673 __skb_unlink(skb, &sk->sk_receive_queue); in tcp_read_skb()
1675 tcp_flags = TCP_SKB_CB(skb)->tcp_flags; in tcp_read_skb()
1694 u32 seq = tp->copied_seq; in tcp_read_done()
1699 if (sk->sk_state == TCP_LISTEN) in tcp_read_done()
1706 used = min_t(size_t, skb->len - offset, left); in tcp_read_done()
1708 left -= used; in tcp_read_done()
1710 if (skb->len > offset + used) in tcp_read_done()
1713 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { in tcp_read_done()
1720 WRITE_ONCE(tp->copied_seq, seq); in tcp_read_done()
1726 tcp_cleanup_rbuf(sk, len - left); in tcp_read_done()
1732 return tcp_inq(sock->sk); in tcp_peek_len()
1741 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) in tcp_set_rcvlowat()
1742 cap = sk->sk_rcvbuf >> 1; in tcp_set_rcvlowat()
1744 cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; in tcp_set_rcvlowat()
1746 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); in tcp_set_rcvlowat()
1751 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) in tcp_set_rcvlowat()
1755 if (space > sk->sk_rcvbuf) { in tcp_set_rcvlowat()
1756 WRITE_ONCE(sk->sk_rcvbuf, space); in tcp_set_rcvlowat()
1757 WRITE_ONCE(tcp_sk(sk)->window_clamp, val); in tcp_set_rcvlowat()
1766 if (skb->tstamp) in tcp_update_recv_tstamps()
1767 tss->ts[0] = ktime_to_timespec64(skb->tstamp); in tcp_update_recv_tstamps()
1769 tss->ts[0] = (struct timespec64) {0}; in tcp_update_recv_tstamps()
1771 if (skb_hwtstamps(skb)->hwtstamp) in tcp_update_recv_tstamps()
1772 tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp); in tcp_update_recv_tstamps()
1774 tss->ts[2] = (struct timespec64) {0}; in tcp_update_recv_tstamps()
1784 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) in tcp_mmap()
1785 return -EPERM; in tcp_mmap()
1791 vma->vm_ops = &tcp_vm_ops; in tcp_mmap()
1801 if (unlikely(offset_skb >= skb->len)) in skb_advance_to_frag()
1804 offset_skb -= skb_headlen(skb); in skb_advance_to_frag()
1808 frag = skb_shinfo(skb)->frags; in skb_advance_to_frag()
1814 offset_skb -= skb_frag_size(frag); in skb_advance_to_frag()
1830 if (PageCompound(page) || page->mapping) in can_map_frag()
1860 zc->recv_skip_hint = skb->len - offset; in tcp_zerocopy_set_hint_for_skb()
1871 if (frag == &info->frags[info->nr_frags - 1]) in tcp_zerocopy_set_hint_for_skb()
1875 partial_frag_remainder = skb_frag_size(frag) - frag_offset; in tcp_zerocopy_set_hint_for_skb()
1876 zc->recv_skip_hint -= partial_frag_remainder; in tcp_zerocopy_set_hint_for_skb()
1884 mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint); in tcp_zerocopy_set_hint_for_skb()
1885 zc->recv_skip_hint = mappable_offset + partial_frag_remainder; in tcp_zerocopy_set_hint_for_skb()
1895 unsigned long copy_address = (unsigned long)zc->copybuf_address; in receive_fallback_to_copy()
1899 zc->length = 0; in receive_fallback_to_copy()
1900 zc->recv_skip_hint = 0; in receive_fallback_to_copy()
1902 if (copy_address != zc->copybuf_address) in receive_fallback_to_copy()
1903 return -EINVAL; in receive_fallback_to_copy()
1911 tss, &zc->msg_flags); in receive_fallback_to_copy()
1915 zc->copybuf_len = err; in receive_fallback_to_copy()
1916 if (likely(zc->copybuf_len)) { in receive_fallback_to_copy()
1920 skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset); in receive_fallback_to_copy()
1931 unsigned long copy_address = (unsigned long)zc->copybuf_address; in tcp_copy_straggler_data()
1935 if (copy_address != zc->copybuf_address) in tcp_copy_straggler_data()
1936 return -EINVAL; in tcp_copy_straggler_data()
1945 zc->recv_skip_hint -= copylen; in tcp_copy_straggler_data()
1958 u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint); in tcp_zc_handle_leftover()
1964 offset = *seq - TCP_SKB_CB(skb)->seq; in tcp_zc_handle_leftover()
1967 if (TCP_SKB_CB(skb)->has_rxtstamp) { in tcp_zc_handle_leftover()
1969 zc->msg_flags |= TCP_CMSG_TS; in tcp_zc_handle_leftover()
1973 zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset, in tcp_zc_handle_leftover()
1975 return zc->copybuf_len < 0 ? 0 : copylen; in tcp_zc_handle_leftover()
1989 if (err == -EBUSY && in tcp_zerocopy_vm_insert_batch_error()
1990 zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) { in tcp_zerocopy_vm_insert_batch_error()
1993 maybe_zap_len = total_bytes_to_map - /* All bytes to map */ in tcp_zerocopy_vm_insert_batch_error()
2008 bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining); in tcp_zerocopy_vm_insert_batch_error()
2020 *length -= bytes_not_mapped; in tcp_zerocopy_vm_insert_batch_error()
2021 zc->recv_skip_hint += bytes_not_mapped; in tcp_zerocopy_vm_insert_batch_error()
2041 pages_mapped = pages_to_map - (unsigned int)pages_remaining; in tcp_zerocopy_vm_insert_batch()
2066 msg_control_addr = (unsigned long)zc->msg_control; in tcp_zc_finalize_rx_tstamp()
2069 (__kernel_size_t)zc->msg_controllen; in tcp_zc_finalize_rx_tstamp()
2073 zc->msg_flags = 0; in tcp_zc_finalize_rx_tstamp()
2074 if (zc->msg_control == msg_control_addr && in tcp_zc_finalize_rx_tstamp()
2075 zc->msg_controllen == cmsg_dummy.msg_controllen) { in tcp_zc_finalize_rx_tstamp()
2077 zc->msg_control = (__u64) in tcp_zc_finalize_rx_tstamp()
2079 zc->msg_controllen = in tcp_zc_finalize_rx_tstamp()
2081 zc->msg_flags = (__u32)cmsg_dummy.msg_flags; in tcp_zc_finalize_rx_tstamp()
2092 if (vma->vm_ops != &tcp_vm_ops) { in find_tcp_vma()
2102 if (!vma || vma->vm_ops != &tcp_vm_ops) { in find_tcp_vma()
2116 unsigned long address = (unsigned long)zc->address; in tcp_zerocopy_receive()
2118 s32 copybuf_len = zc->copybuf_len; in tcp_zerocopy_receive()
2124 u32 seq = tp->copied_seq; in tcp_zerocopy_receive()
2130 zc->copybuf_len = 0; in tcp_zerocopy_receive()
2131 zc->msg_flags = 0; in tcp_zerocopy_receive()
2133 if (address & (PAGE_SIZE - 1) || address != zc->address) in tcp_zerocopy_receive()
2134 return -EINVAL; in tcp_zerocopy_receive()
2136 if (sk->sk_state == TCP_LISTEN) in tcp_zerocopy_receive()
2137 return -ENOTCONN; in tcp_zerocopy_receive()
2145 zc->length = 0; in tcp_zerocopy_receive()
2146 zc->recv_skip_hint = inq; in tcp_zerocopy_receive()
2148 return -EIO; in tcp_zerocopy_receive()
2152 vma = find_tcp_vma(current->mm, address, &mmap_locked); in tcp_zerocopy_receive()
2154 return -EINVAL; in tcp_zerocopy_receive()
2156 vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); in tcp_zerocopy_receive()
2158 total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); in tcp_zerocopy_receive()
2160 if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) in tcp_zerocopy_receive()
2163 zc->length = total_bytes_to_map; in tcp_zerocopy_receive()
2164 zc->recv_skip_hint = 0; in tcp_zerocopy_receive()
2166 zc->length = avail_len; in tcp_zerocopy_receive()
2167 zc->recv_skip_hint = avail_len; in tcp_zerocopy_receive()
2170 while (length + PAGE_SIZE <= zc->length) { in tcp_zerocopy_receive()
2174 if (zc->recv_skip_hint < PAGE_SIZE) { in tcp_zerocopy_receive()
2178 if (zc->recv_skip_hint > 0) in tcp_zerocopy_receive()
2180 skb = skb->next; in tcp_zerocopy_receive()
2181 offset = seq - TCP_SKB_CB(skb)->seq; in tcp_zerocopy_receive()
2189 if (TCP_SKB_CB(skb)->has_rxtstamp) { in tcp_zerocopy_receive()
2191 zc->msg_flags |= TCP_CMSG_TS; in tcp_zerocopy_receive()
2193 zc->recv_skip_hint = skb->len - offset; in tcp_zerocopy_receive()
2200 zc->recv_skip_hint); in tcp_zerocopy_receive()
2202 zc->recv_skip_hint = mappable_offset; in tcp_zerocopy_receive()
2212 zc->recv_skip_hint -= PAGE_SIZE; in tcp_zerocopy_receive()
2215 zc->recv_skip_hint < PAGE_SIZE) { in tcp_zerocopy_receive()
2236 mmap_read_unlock(current->mm); in tcp_zerocopy_receive()
2244 WRITE_ONCE(tp->copied_seq, seq); in tcp_zerocopy_receive()
2251 if (length == zc->length) in tcp_zerocopy_receive()
2252 zc->recv_skip_hint = 0; in tcp_zerocopy_receive()
2254 if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) in tcp_zerocopy_receive()
2255 ret = -EIO; in tcp_zerocopy_receive()
2257 zc->length = length; in tcp_zerocopy_receive()
2267 u32 tsflags = READ_ONCE(sk->sk_tsflags); in tcp_recv_timestamp()
2270 if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { in tcp_recv_timestamp()
2275 .tv_sec = tss->ts[0].tv_sec, in tcp_recv_timestamp()
2276 .tv_nsec = tss->ts[0].tv_nsec, in tcp_recv_timestamp()
2282 .tv_sec = tss->ts[0].tv_sec, in tcp_recv_timestamp()
2283 .tv_nsec = tss->ts[0].tv_nsec, in tcp_recv_timestamp()
2291 .tv_sec = tss->ts[0].tv_sec, in tcp_recv_timestamp()
2292 .tv_usec = tss->ts[0].tv_nsec / 1000, in tcp_recv_timestamp()
2298 .tv_sec = tss->ts[0].tv_sec, in tcp_recv_timestamp()
2299 .tv_usec = tss->ts[0].tv_nsec / 1000, in tcp_recv_timestamp()
2312 tss->ts[0] = (struct timespec64) {0}; in tcp_recv_timestamp()
2315 if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { in tcp_recv_timestamp()
2321 tss->ts[2] = (struct timespec64) {0}; in tcp_recv_timestamp()
2325 tss->ts[1] = (struct timespec64) {0}; in tcp_recv_timestamp()
2336 u32 copied_seq = READ_ONCE(tp->copied_seq); in tcp_inq_hint()
2337 u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); in tcp_inq_hint()
2340 inq = rcv_nxt - copied_seq; in tcp_inq_hint()
2341 if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { in tcp_inq_hint()
2343 inq = tp->rcv_nxt - tp->copied_seq; in tcp_inq_hint()
2346 /* After receiving a FIN, tell the user-space to continue reading in tcp_inq_hint()
2347 * by returning a non-zero inq. in tcp_inq_hint()
2367 for (i = 0; i < p->idx; i++) in tcp_xa_pool_commit_locked()
2368 __xa_cmpxchg(&sk->sk_user_frags, p->tokens[i], XA_ZERO_ENTRY, in tcp_xa_pool_commit_locked()
2369 (__force void *)p->netmems[i], GFP_KERNEL); in tcp_xa_pool_commit_locked()
2370 /* Rollback what has been pre-allocated and is no longer needed. */ in tcp_xa_pool_commit_locked()
2371 for (; i < p->max; i++) in tcp_xa_pool_commit_locked()
2372 __xa_erase(&sk->sk_user_frags, p->tokens[i]); in tcp_xa_pool_commit_locked()
2374 p->max = 0; in tcp_xa_pool_commit_locked()
2375 p->idx = 0; in tcp_xa_pool_commit_locked()
2380 if (!p->max) in tcp_xa_pool_commit()
2383 xa_lock_bh(&sk->sk_user_frags); in tcp_xa_pool_commit()
2387 xa_unlock_bh(&sk->sk_user_frags); in tcp_xa_pool_commit()
2395 if (p->idx < p->max) in tcp_xa_pool_refill()
2398 xa_lock_bh(&sk->sk_user_frags); in tcp_xa_pool_refill()
2403 err = __xa_alloc(&sk->sk_user_frags, &p->tokens[k], in tcp_xa_pool_refill()
2409 xa_unlock_bh(&sk->sk_user_frags); in tcp_xa_pool_refill()
2411 p->max = k; in tcp_xa_pool_refill()
2412 p->idx = 0; in tcp_xa_pool_refill()
2416 /* On error, returns the -errno. On success, returns number of bytes sent to the
2436 err = -ENODEV; in tcp_recvmsg_dmabuf()
2441 copy = start - offset; in tcp_recvmsg_dmabuf()
2445 n = copy_to_iter(skb->data + offset, copy, in tcp_recvmsg_dmabuf()
2446 &msg->msg_iter); in tcp_recvmsg_dmabuf()
2448 err = -EFAULT; in tcp_recvmsg_dmabuf()
2453 remaining_len -= copy; in tcp_recvmsg_dmabuf()
2476 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in tcp_recvmsg_dmabuf()
2477 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in tcp_recvmsg_dmabuf()
2490 net_err_ratelimited("Found non-dmabuf skb with net_iov"); in tcp_recvmsg_dmabuf()
2491 err = -ENODEV; in tcp_recvmsg_dmabuf()
2497 copy = end - offset; in tcp_recvmsg_dmabuf()
2503 skb_frag_off(frag) + offset - in tcp_recvmsg_dmabuf()
2508 skb_shinfo(skb)->nr_frags - i); in tcp_recvmsg_dmabuf()
2517 remaining_len -= copy; in tcp_recvmsg_dmabuf()
2526 atomic_long_inc(&niov->pp_ref_count); in tcp_recvmsg_dmabuf()
2544 skb = skb_shinfo(skb)->frag_list ?: skb->next; in tcp_recvmsg_dmabuf()
2546 offset = offset - start; in tcp_recvmsg_dmabuf()
2550 err = -EFAULT; in tcp_recvmsg_dmabuf()
2566 * tricks with *seq access order and skb->users are not required.
2575 int last_copied_dmabuf = -1; /* uninitialized */ in tcp_recvmsg_locked()
2587 err = -ENOTCONN; in tcp_recvmsg_locked()
2588 if (sk->sk_state == TCP_LISTEN) in tcp_recvmsg_locked()
2591 if (tp->recvmsg_inq) { in tcp_recvmsg_locked()
2593 msg->msg_get_inq = 1; in tcp_recvmsg_locked()
2597 /* Urgent data needs to be handled specially. */ in tcp_recvmsg_locked()
2601 if (unlikely(tp->repair)) { in tcp_recvmsg_locked()
2602 err = -EPERM; in tcp_recvmsg_locked()
2606 if (tp->repair_queue == TCP_SEND_QUEUE) in tcp_recvmsg_locked()
2609 err = -EINVAL; in tcp_recvmsg_locked()
2610 if (tp->repair_queue == TCP_NO_QUEUE) in tcp_recvmsg_locked()
2613 /* 'common' recv queue MSG_PEEK-ing */ in tcp_recvmsg_locked()
2616 seq = &tp->copied_seq; in tcp_recvmsg_locked()
2619 peek_seq = tp->copied_seq + peek_offset; in tcp_recvmsg_locked()
2628 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ in tcp_recvmsg_locked()
2629 if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { in tcp_recvmsg_locked()
2633 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; in tcp_recvmsg_locked()
2640 last = skb_peek_tail(&sk->sk_receive_queue); in tcp_recvmsg_locked()
2641 skb_queue_walk(&sk->sk_receive_queue, skb) { in tcp_recvmsg_locked()
2646 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), in tcp_recvmsg_locked()
2648 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, in tcp_recvmsg_locked()
2652 offset = *seq - TCP_SKB_CB(skb)->seq; in tcp_recvmsg_locked()
2653 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in tcp_recvmsg_locked()
2655 offset--; in tcp_recvmsg_locked()
2657 if (offset < skb->len) in tcp_recvmsg_locked()
2659 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_recvmsg_locked()
2663 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); in tcp_recvmsg_locked()
2668 if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) in tcp_recvmsg_locked()
2673 sk->sk_err || in tcp_recvmsg_locked()
2674 sk->sk_state == TCP_CLOSE || in tcp_recvmsg_locked()
2675 (sk->sk_shutdown & RCV_SHUTDOWN) || in tcp_recvmsg_locked()
2682 if (sk->sk_err) { in tcp_recvmsg_locked()
2687 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_recvmsg_locked()
2690 if (sk->sk_state == TCP_CLOSE) { in tcp_recvmsg_locked()
2694 copied = -ENOTCONN; in tcp_recvmsg_locked()
2699 copied = -EAGAIN; in tcp_recvmsg_locked()
2722 (peek_seq - peek_offset - copied - urg_hole != tp->copied_seq)) { in tcp_recvmsg_locked()
2724 current->comm, in tcp_recvmsg_locked()
2726 peek_seq = tp->copied_seq + peek_offset; in tcp_recvmsg_locked()
2732 used = skb->len - offset; in tcp_recvmsg_locked()
2736 /* Do we have urgent data here? */ in tcp_recvmsg_locked()
2737 if (unlikely(tp->urg_data)) { in tcp_recvmsg_locked()
2738 u32 urg_offset = tp->urg_seq - *seq; in tcp_recvmsg_locked()
2745 used--; in tcp_recvmsg_locked()
2755 if (last_copied_dmabuf != -1 && in tcp_recvmsg_locked()
2765 copied = -EFAULT; in tcp_recvmsg_locked()
2774 copied = -EFAULT; in tcp_recvmsg_locked()
2783 copied = -EFAULT; in tcp_recvmsg_locked()
2795 len -= used; in tcp_recvmsg_locked()
2803 if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { in tcp_recvmsg_locked()
2804 WRITE_ONCE(tp->urg_data, 0); in tcp_recvmsg_locked()
2808 if (TCP_SKB_CB(skb)->has_rxtstamp) { in tcp_recvmsg_locked()
2813 if (used + offset < skb->len) in tcp_recvmsg_locked()
2816 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_recvmsg_locked()
2831 * on connected socket. I was just happy when found this 8) --ANK in tcp_recvmsg_locked()
2860 skb_queue_empty_lockless(&sk->sk_receive_queue) && in tcp_recvmsg()
2861 sk->sk_state == TCP_ESTABLISHED) in tcp_recvmsg()
2868 if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) { in tcp_recvmsg()
2871 if (msg->msg_get_inq) { in tcp_recvmsg()
2872 msg->msg_inq = tcp_inq_hint(sk); in tcp_recvmsg()
2875 sizeof(msg->msg_inq), &msg->msg_inq); in tcp_recvmsg()
2884 int oldstate = sk->sk_state; in tcp_set_state()
2914 * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF in tcp_set_state()
2936 sk->sk_prot->unhash(sk); in tcp_set_state()
2937 if (inet_csk(sk)->icsk_bind_hash && in tcp_set_state()
2938 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) in tcp_set_state()
2979 int next = (int)new_state[sk->sk_state]; in tcp_close_state()
3002 if ((1 << sk->sk_state) & in tcp_shutdown()
3040 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && in tcp_out_of_memory()
3056 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); in tcp_check_oom()
3066 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); in __tcp_close()
3068 if (sk->sk_state == TCP_LISTEN) { in __tcp_close()
3078 * descriptor close, not protocol-sourced closes, because the in __tcp_close()
3081 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { in __tcp_close()
3082 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; in __tcp_close()
3084 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in __tcp_close()
3085 len--; in __tcp_close()
3090 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ in __tcp_close()
3091 if (sk->sk_state == TCP_CLOSE) in __tcp_close()
3098 * advertise a zero window, then kill -9 the FTP client, wheee... in __tcp_close()
3101 if (unlikely(tcp_sk(sk)->repair)) { in __tcp_close()
3102 sk->sk_prot->disconnect(sk, 0); in __tcp_close()
3107 tcp_send_active_reset(sk, sk->sk_allocation, in __tcp_close()
3109 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { in __tcp_close()
3111 sk->sk_prot->disconnect(sk, 0); in __tcp_close()
3118 /* RED-PEN. Formally speaking, we have broken TCP state in __tcp_close()
3121 * TCP_ESTABLISHED -> TCP_FIN_WAIT1 in __tcp_close()
3122 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (it is difficult) in __tcp_close()
3123 * TCP_CLOSE_WAIT -> TCP_LAST_ACK in __tcp_close()
3129 * if Linux state is FIN-WAIT-1, but FIN is still not sent. in __tcp_close()
3132 * we enter time-wait state, when it is not required really in __tcp_close()
3137 * --ANK in __tcp_close()
3138 * XXX (TFO) - To start off we don't support SYN+ACK+FIN in __tcp_close()
3140 * probably need API support or TCP_CORK SYN-ACK until in __tcp_close()
3149 state = sk->sk_state; in __tcp_close()
3161 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) in __tcp_close()
3168 * our end. If they send after that then tough - BUT: long enough in __tcp_close()
3169 * that we won't make the old 4*rto = almost no time - whoops in __tcp_close()
3175 * linger2 option. --ANK in __tcp_close()
3178 if (sk->sk_state == TCP_FIN_WAIT2) { in __tcp_close()
3180 if (READ_ONCE(tp->linger2) < 0) { in __tcp_close()
3191 tmo - TCP_TIMEWAIT_LEN); in __tcp_close()
3198 if (sk->sk_state != TCP_CLOSE) { in __tcp_close()
3211 if (sk->sk_state == TCP_CLOSE) { in __tcp_close()
3214 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, in __tcp_close()
3216 /* We could get here with a non-NULL req if the socket is in __tcp_close()
3236 if (!sk->sk_net_refcnt) in tcp_close()
3253 struct rb_node *p = rb_first(&sk->tcp_rtx_queue); in tcp_rtx_queue_purge()
3255 tcp_sk(sk)->highest_sack = NULL; in tcp_rtx_queue_purge()
3261 * list_del(&skb->tcp_tsorted_anchor) in tcp_rtx_queue_purge()
3273 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { in tcp_write_queue_purge()
3278 INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); in tcp_write_queue_purge()
3280 tcp_sk(sk)->packets_out = 0; in tcp_write_queue_purge()
3281 inet_csk(sk)->icsk_backoff = 0; in tcp_write_queue_purge()
3289 int old_state = sk->sk_state; in tcp_disconnect()
3298 } else if (unlikely(tp->repair)) { in tcp_disconnect()
3299 WRITE_ONCE(sk->sk_err, ECONNABORTED); in tcp_disconnect()
3302 WRITE_ONCE(sk->sk_err, ECONNRESET); in tcp_disconnect()
3303 } else if (tp->snd_nxt != tp->write_seq && in tcp_disconnect()
3310 WRITE_ONCE(sk->sk_err, ECONNRESET); in tcp_disconnect()
3312 WRITE_ONCE(sk->sk_err, ECONNRESET); in tcp_disconnect()
3315 __skb_queue_purge(&sk->sk_receive_queue); in tcp_disconnect()
3316 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_disconnect()
3317 WRITE_ONCE(tp->urg_data, 0); in tcp_disconnect()
3318 sk_set_peek_off(sk, -1); in tcp_disconnect()
3321 skb_rbtree_purge(&tp->out_of_order_queue); in tcp_disconnect()
3323 inet->inet_dport = 0; in tcp_disconnect()
3327 WRITE_ONCE(sk->sk_shutdown, 0); in tcp_disconnect()
3329 tp->srtt_us = 0; in tcp_disconnect()
3330 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); in tcp_disconnect()
3331 tp->rcv_rtt_last_tsecr = 0; in tcp_disconnect()
3333 seq = tp->write_seq + tp->max_window + 2; in tcp_disconnect()
3336 WRITE_ONCE(tp->write_seq, seq); in tcp_disconnect()
3338 icsk->icsk_backoff = 0; in tcp_disconnect()
3339 icsk->icsk_probes_out = 0; in tcp_disconnect()
3340 icsk->icsk_probes_tstamp = 0; in tcp_disconnect()
3341 icsk->icsk_rto = TCP_TIMEOUT_INIT; in tcp_disconnect()
3342 icsk->icsk_rto_min = TCP_RTO_MIN; in tcp_disconnect()
3343 icsk->icsk_delack_max = TCP_DELACK_MAX; in tcp_disconnect()
3344 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; in tcp_disconnect()
3346 tp->snd_cwnd_cnt = 0; in tcp_disconnect()
3347 tp->is_cwnd_limited = 0; in tcp_disconnect()
3348 tp->max_packets_out = 0; in tcp_disconnect()
3349 tp->window_clamp = 0; in tcp_disconnect()
3350 tp->delivered = 0; in tcp_disconnect()
3351 tp->delivered_ce = 0; in tcp_disconnect()
3352 if (icsk->icsk_ca_initialized && icsk->icsk_ca_ops->release) in tcp_disconnect()
3353 icsk->icsk_ca_ops->release(sk); in tcp_disconnect()
3354 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); in tcp_disconnect()
3355 icsk->icsk_ca_initialized = 0; in tcp_disconnect()
3357 tp->is_sack_reneg = 0; in tcp_disconnect()
3359 tp->total_retrans = 0; in tcp_disconnect()
3364 icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; in tcp_disconnect()
3365 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); in tcp_disconnect()
3367 dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL))); in tcp_disconnect()
3369 tp->compressed_ack = 0; in tcp_disconnect()
3370 tp->segs_in = 0; in tcp_disconnect()
3371 tp->segs_out = 0; in tcp_disconnect()
3372 tp->bytes_sent = 0; in tcp_disconnect()
3373 tp->bytes_acked = 0; in tcp_disconnect()
3374 tp->bytes_received = 0; in tcp_disconnect()
3375 tp->bytes_retrans = 0; in tcp_disconnect()
3376 tp->data_segs_in = 0; in tcp_disconnect()
3377 tp->data_segs_out = 0; in tcp_disconnect()
3378 tp->duplicate_sack[0].start_seq = 0; in tcp_disconnect()
3379 tp->duplicate_sack[0].end_seq = 0; in tcp_disconnect()
3380 tp->dsack_dups = 0; in tcp_disconnect()
3381 tp->reord_seen = 0; in tcp_disconnect()
3382 tp->retrans_out = 0; in tcp_disconnect()
3383 tp->sacked_out = 0; in tcp_disconnect()
3384 tp->tlp_high_seq = 0; in tcp_disconnect()
3385 tp->last_oow_ack_time = 0; in tcp_disconnect()
3386 tp->plb_rehash = 0; in tcp_disconnect()
3388 tp->app_limited = ~0U; in tcp_disconnect()
3389 tp->rate_app_limited = 1; in tcp_disconnect()
3390 tp->rack.mstamp = 0; in tcp_disconnect()
3391 tp->rack.advanced = 0; in tcp_disconnect()
3392 tp->rack.reo_wnd_steps = 1; in tcp_disconnect()
3393 tp->rack.last_delivered = 0; in tcp_disconnect()
3394 tp->rack.reo_wnd_persist = 0; in tcp_disconnect()
3395 tp->rack.dsack_seen = 0; in tcp_disconnect()
3396 tp->syn_data_acked = 0; in tcp_disconnect()
3397 tp->rx_opt.saw_tstamp = 0; in tcp_disconnect()
3398 tp->rx_opt.dsack = 0; in tcp_disconnect()
3399 tp->rx_opt.num_sacks = 0; in tcp_disconnect()
3400 tp->rcv_ooopack = 0; in tcp_disconnect()
3406 tp->fastopen_client_fail = 0; in tcp_disconnect()
3408 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); in tcp_disconnect()
3410 if (sk->sk_frag.page) { in tcp_disconnect()
3411 put_page(sk->sk_frag.page); in tcp_disconnect()
3412 sk->sk_frag.page = NULL; in tcp_disconnect()
3413 sk->sk_frag.offset = 0; in tcp_disconnect()
3422 return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && in tcp_can_repair_sock()
3423 (sk->sk_state != TCP_LISTEN); in tcp_can_repair_sock()
3430 if (!tp->repair) in tcp_repair_set_window()
3431 return -EPERM; in tcp_repair_set_window()
3434 return -EINVAL; in tcp_repair_set_window()
3437 return -EFAULT; in tcp_repair_set_window()
3440 return -EINVAL; in tcp_repair_set_window()
3442 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) in tcp_repair_set_window()
3443 return -EINVAL; in tcp_repair_set_window()
3445 if (after(opt.rcv_wup, tp->rcv_nxt)) in tcp_repair_set_window()
3446 return -EINVAL; in tcp_repair_set_window()
3448 tp->snd_wl1 = opt.snd_wl1; in tcp_repair_set_window()
3449 tp->snd_wnd = opt.snd_wnd; in tcp_repair_set_window()
3450 tp->max_window = opt.max_window; in tcp_repair_set_window()
3452 tp->rcv_wnd = opt.rcv_wnd; in tcp_repair_set_window()
3453 tp->rcv_wup = opt.rcv_wup; in tcp_repair_set_window()
3467 return -EFAULT; in tcp_repair_options_est()
3470 len -= sizeof(opt); in tcp_repair_options_est()
3474 tp->rx_opt.mss_clamp = opt.opt_val; in tcp_repair_options_est()
3483 return -EFBIG; in tcp_repair_options_est()
3485 tp->rx_opt.snd_wscale = snd_wscale; in tcp_repair_options_est()
3486 tp->rx_opt.rcv_wscale = rcv_wscale; in tcp_repair_options_est()
3487 tp->rx_opt.wscale_ok = 1; in tcp_repair_options_est()
3492 return -EINVAL; in tcp_repair_options_est()
3494 tp->rx_opt.sack_ok |= TCP_SACK_SEEN; in tcp_repair_options_est()
3498 return -EINVAL; in tcp_repair_options_est()
3500 tp->rx_opt.tstamp_ok = 1; in tcp_repair_options_est()
3523 /* When set indicates to always queue non-full frames. Later the user clears
3537 tp->nonagle |= TCP_NAGLE_CORK; in __tcp_sock_set_cork()
3539 tp->nonagle &= ~TCP_NAGLE_CORK; in __tcp_sock_set_cork()
3540 if (tp->nonagle & TCP_NAGLE_OFF) in __tcp_sock_set_cork()
3541 tp->nonagle |= TCP_NAGLE_PUSH; in __tcp_sock_set_cork()
3563 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; in __tcp_sock_set_nodelay()
3566 tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF; in __tcp_sock_set_nodelay()
3586 if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && in __tcp_sock_set_quickack()
3588 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; in __tcp_sock_set_quickack()
3606 return -EINVAL; in tcp_sock_set_syncnt()
3608 WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val); in tcp_sock_set_syncnt()
3619 return -EINVAL; in tcp_sock_set_user_timeout()
3621 WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val); in tcp_sock_set_user_timeout()
3631 return -EINVAL; in tcp_sock_set_keepidle_locked()
3634 WRITE_ONCE(tp->keepalive_time, val * HZ); in tcp_sock_set_keepidle_locked()
3636 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { in tcp_sock_set_keepidle_locked()
3639 if (tp->keepalive_time > elapsed) in tcp_sock_set_keepidle_locked()
3640 elapsed = tp->keepalive_time - elapsed; in tcp_sock_set_keepidle_locked()
3663 return -EINVAL; in tcp_sock_set_keepintvl()
3665 WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ); in tcp_sock_set_keepintvl()
3673 return -EINVAL; in tcp_sock_set_keepcnt()
3676 WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val); in tcp_sock_set_keepcnt()
3686 if (sk->sk_state != TCP_CLOSE) in tcp_set_window_clamp()
3687 return -EINVAL; in tcp_set_window_clamp()
3688 WRITE_ONCE(tp->window_clamp, 0); in tcp_set_window_clamp()
3690 u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp; in tcp_set_window_clamp()
3697 WRITE_ONCE(tp->window_clamp, new_window_clamp); in tcp_set_window_clamp()
3702 __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp); in tcp_set_window_clamp()
3705 new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp); in tcp_set_window_clamp()
3706 tp->rcv_ssthresh = max(new_rcv_ssthresh, in tcp_set_window_clamp()
3707 tp->rcv_ssthresh); in tcp_set_window_clamp()
3731 return -EINVAL; in do_tcp_setsockopt()
3734 min_t(long, TCP_CA_NAME_MAX-1, optlen)); in do_tcp_setsockopt()
3736 return -EFAULT; in do_tcp_setsockopt()
3741 sockopt_ns_capable(sock_net(sk)->user_ns, in do_tcp_setsockopt()
3750 return -EINVAL; in do_tcp_setsockopt()
3753 min_t(long, TCP_ULP_NAME_MAX - 1, in do_tcp_setsockopt()
3756 return -EFAULT; in do_tcp_setsockopt()
3773 return -EINVAL; in do_tcp_setsockopt()
3776 return -EFAULT; in do_tcp_setsockopt()
3789 return -EINVAL; in do_tcp_setsockopt()
3792 return -EFAULT; in do_tcp_setsockopt()
3806 WRITE_ONCE(tp->linger2, -1); in do_tcp_setsockopt()
3808 WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX); in do_tcp_setsockopt()
3810 WRITE_ONCE(tp->linger2, val * HZ); in do_tcp_setsockopt()
3814 WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept, in do_tcp_setsockopt()
3829 err = -EINVAL; in do_tcp_setsockopt()
3832 tp->rx_opt.user_mss = val; in do_tcp_setsockopt()
3841 err = -EINVAL; in do_tcp_setsockopt()
3843 tp->thin_lto = val; in do_tcp_setsockopt()
3848 err = -EINVAL; in do_tcp_setsockopt()
3853 err = -EPERM; in do_tcp_setsockopt()
3855 tp->repair = 1; in do_tcp_setsockopt()
3856 sk->sk_reuse = SK_FORCE_REUSE; in do_tcp_setsockopt()
3857 tp->repair_queue = TCP_NO_QUEUE; in do_tcp_setsockopt()
3859 tp->repair = 0; in do_tcp_setsockopt()
3860 sk->sk_reuse = SK_NO_REUSE; in do_tcp_setsockopt()
3863 tp->repair = 0; in do_tcp_setsockopt()
3864 sk->sk_reuse = SK_NO_REUSE; in do_tcp_setsockopt()
3866 err = -EINVAL; in do_tcp_setsockopt()
3871 if (!tp->repair) in do_tcp_setsockopt()
3872 err = -EPERM; in do_tcp_setsockopt()
3874 tp->repair_queue = val; in do_tcp_setsockopt()
3876 err = -EINVAL; in do_tcp_setsockopt()
3880 if (sk->sk_state != TCP_CLOSE) { in do_tcp_setsockopt()
3881 err = -EPERM; in do_tcp_setsockopt()
3882 } else if (tp->repair_queue == TCP_SEND_QUEUE) { in do_tcp_setsockopt()
3884 err = -EPERM; in do_tcp_setsockopt()
3886 WRITE_ONCE(tp->write_seq, val); in do_tcp_setsockopt()
3887 } else if (tp->repair_queue == TCP_RECV_QUEUE) { in do_tcp_setsockopt()
3888 if (tp->rcv_nxt != tp->copied_seq) { in do_tcp_setsockopt()
3889 err = -EPERM; in do_tcp_setsockopt()
3891 WRITE_ONCE(tp->rcv_nxt, val); in do_tcp_setsockopt()
3892 WRITE_ONCE(tp->copied_seq, val); in do_tcp_setsockopt()
3895 err = -EINVAL; in do_tcp_setsockopt()
3900 if (!tp->repair) in do_tcp_setsockopt()
3901 err = -EINVAL; in do_tcp_setsockopt()
3902 else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent) in do_tcp_setsockopt()
3905 err = -EPERM; in do_tcp_setsockopt()
3918 err = -EINVAL; in do_tcp_setsockopt()
3920 tp->save_syn = val; in do_tcp_setsockopt()
3933 err = -EPERM; in do_tcp_setsockopt()
3942 /* If this is the first TCP-AO setsockopt() on the socket, in do_tcp_setsockopt()
3946 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) in do_tcp_setsockopt()
3948 if (rcu_dereference_protected(tcp_sk(sk)->ao_info, in do_tcp_setsockopt()
3951 if (tp->repair) in do_tcp_setsockopt()
3953 err = -EISCONN; in do_tcp_setsockopt()
3956 err = tp->af_specific->ao_parse(sk, optname, optval, optlen); in do_tcp_setsockopt()
3963 err = tp->af_specific->md5_parse(sk, optname, optval, optlen); in do_tcp_setsockopt()
3967 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | in do_tcp_setsockopt()
3973 err = -EINVAL; in do_tcp_setsockopt()
3978 err = -EINVAL; in do_tcp_setsockopt()
3979 } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & in do_tcp_setsockopt()
3981 if (sk->sk_state == TCP_CLOSE) in do_tcp_setsockopt()
3982 tp->fastopen_connect = val; in do_tcp_setsockopt()
3984 err = -EINVAL; in do_tcp_setsockopt()
3986 err = -EOPNOTSUPP; in do_tcp_setsockopt()
3991 err = -EINVAL; in do_tcp_setsockopt()
3992 else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) in do_tcp_setsockopt()
3993 err = -EINVAL; in do_tcp_setsockopt()
3995 tp->fastopen_no_cookie = val; in do_tcp_setsockopt()
3998 if (!tp->repair) { in do_tcp_setsockopt()
3999 err = -EPERM; in do_tcp_setsockopt()
4006 tp->tcp_usec_ts = val & 1; in do_tcp_setsockopt()
4007 WRITE_ONCE(tp->tsoffset, val - tcp_clock_ts(tp->tcp_usec_ts)); in do_tcp_setsockopt()
4013 WRITE_ONCE(tp->notsent_lowat, val); in do_tcp_setsockopt()
4014 sk->sk_write_space(sk); in do_tcp_setsockopt()
4018 err = -EINVAL; in do_tcp_setsockopt()
4020 tp->recvmsg_inq = val; in do_tcp_setsockopt()
4025 WRITE_ONCE(tp->tcp_tx_delay, val); in do_tcp_setsockopt()
4028 err = -ENOPROTOOPT; in do_tcp_setsockopt()
4043 return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname, in tcp_setsockopt()
4056 stats[i] = tp->chrono_stat[i - 1]; in tcp_get_info_chrono_stats()
4057 if (i == tp->chrono_type) in tcp_get_info_chrono_stats()
4058 stats[i] += tcp_jiffies32 - tp->chrono_start; in tcp_get_info_chrono_stats()
4063 info->tcpi_busy_time = total; in tcp_get_info_chrono_stats()
4064 info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; in tcp_get_info_chrono_stats()
4065 info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; in tcp_get_info_chrono_stats()
4079 if (sk->sk_type != SOCK_STREAM) in tcp_get_info()
4082 info->tcpi_state = inet_sk_state_load(sk); in tcp_get_info()
4085 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_info()
4087 info->tcpi_pacing_rate = rate64; in tcp_get_info()
4089 rate = READ_ONCE(sk->sk_max_pacing_rate); in tcp_get_info()
4091 info->tcpi_max_pacing_rate = rate64; in tcp_get_info()
4093 info->tcpi_reordering = tp->reordering; in tcp_get_info()
4094 info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); in tcp_get_info()
4096 if (info->tcpi_state == TCP_LISTEN) { in tcp_get_info()
4098 * tcpi_unacked -> Number of children ready for accept() in tcp_get_info()
4099 * tcpi_sacked -> max backlog in tcp_get_info()
4101 info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog); in tcp_get_info()
4102 info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog); in tcp_get_info()
4108 info->tcpi_ca_state = icsk->icsk_ca_state; in tcp_get_info()
4109 info->tcpi_retransmits = icsk->icsk_retransmits; in tcp_get_info()
4110 info->tcpi_probes = icsk->icsk_probes_out; in tcp_get_info()
4111 info->tcpi_backoff = icsk->icsk_backoff; in tcp_get_info()
4113 if (tp->rx_opt.tstamp_ok) in tcp_get_info()
4114 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; in tcp_get_info()
4116 info->tcpi_options |= TCPI_OPT_SACK; in tcp_get_info()
4117 if (tp->rx_opt.wscale_ok) { in tcp_get_info()
4118 info->tcpi_options |= TCPI_OPT_WSCALE; in tcp_get_info()
4119 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; in tcp_get_info()
4120 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; in tcp_get_info()
4123 if (tp->ecn_flags & TCP_ECN_OK) in tcp_get_info()
4124 info->tcpi_options |= TCPI_OPT_ECN; in tcp_get_info()
4125 if (tp->ecn_flags & TCP_ECN_SEEN) in tcp_get_info()
4126 info->tcpi_options |= TCPI_OPT_ECN_SEEN; in tcp_get_info()
4127 if (tp->syn_data_acked) in tcp_get_info()
4128 info->tcpi_options |= TCPI_OPT_SYN_DATA; in tcp_get_info()
4129 if (tp->tcp_usec_ts) in tcp_get_info()
4130 info->tcpi_options |= TCPI_OPT_USEC_TS; in tcp_get_info()
4132 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); in tcp_get_info()
4133 info->tcpi_ato = jiffies_to_usecs(min_t(u32, icsk->icsk_ack.ato, in tcp_get_info()
4135 info->tcpi_snd_mss = tp->mss_cache; in tcp_get_info()
4136 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; in tcp_get_info()
4138 info->tcpi_unacked = tp->packets_out; in tcp_get_info()
4139 info->tcpi_sacked = tp->sacked_out; in tcp_get_info()
4141 info->tcpi_lost = tp->lost_out; in tcp_get_info()
4142 info->tcpi_retrans = tp->retrans_out; in tcp_get_info()
4145 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); in tcp_get_info()
4146 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); in tcp_get_info()
4147 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); in tcp_get_info()
4149 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; in tcp_get_info()
4150 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; in tcp_get_info()
4151 info->tcpi_rtt = tp->srtt_us >> 3; in tcp_get_info()
4152 info->tcpi_rttvar = tp->mdev_us >> 2; in tcp_get_info()
4153 info->tcpi_snd_ssthresh = tp->snd_ssthresh; in tcp_get_info()
4154 info->tcpi_advmss = tp->advmss; in tcp_get_info()
4156 info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; in tcp_get_info()
4157 info->tcpi_rcv_space = tp->rcvq_space.space; in tcp_get_info()
4159 info->tcpi_total_retrans = tp->total_retrans; in tcp_get_info()
4161 info->tcpi_bytes_acked = tp->bytes_acked; in tcp_get_info()
4162 info->tcpi_bytes_received = tp->bytes_received; in tcp_get_info()
4163 info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); in tcp_get_info()
4166 info->tcpi_segs_out = tp->segs_out; in tcp_get_info()
4169 info->tcpi_segs_in = READ_ONCE(tp->segs_in); in tcp_get_info()
4170 info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); in tcp_get_info()
4172 info->tcpi_min_rtt = tcp_min_rtt(tp); in tcp_get_info()
4173 info->tcpi_data_segs_out = tp->data_segs_out; in tcp_get_info()
4175 info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; in tcp_get_info()
4178 info->tcpi_delivery_rate = rate64; in tcp_get_info()
4179 info->tcpi_delivered = tp->delivered; in tcp_get_info()
4180 info->tcpi_delivered_ce = tp->delivered_ce; in tcp_get_info()
4181 info->tcpi_bytes_sent = tp->bytes_sent; in tcp_get_info()
4182 info->tcpi_bytes_retrans = tp->bytes_retrans; in tcp_get_info()
4183 info->tcpi_dsack_dups = tp->dsack_dups; in tcp_get_info()
4184 info->tcpi_reord_seen = tp->reord_seen; in tcp_get_info()
4185 info->tcpi_rcv_ooopack = tp->rcv_ooopack; in tcp_get_info()
4186 info->tcpi_snd_wnd = tp->snd_wnd; in tcp_get_info()
4187 info->tcpi_rcv_wnd = tp->rcv_wnd; in tcp_get_info()
4188 info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash; in tcp_get_info()
4189 info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; in tcp_get_info()
4191 info->tcpi_total_rto = tp->total_rto; in tcp_get_info()
4192 info->tcpi_total_rto_recoveries = tp->total_rto_recoveries; in tcp_get_info()
4193 info->tcpi_total_rto_time = tp->total_rto_time; in tcp_get_info()
4194 if (tp->rto_stamp) in tcp_get_info()
4195 info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp; in tcp_get_info()
4237 if (skb->protocol == htons(ETH_P_IP)) in tcp_skb_ttl_or_hop_limit()
4238 return ip_hdr(skb)->ttl; in tcp_skb_ttl_or_hop_limit()
4239 else if (skb->protocol == htons(ETH_P_IPV6)) in tcp_skb_ttl_or_hop_limit()
4240 return ipv6_hdr(skb)->hop_limit; in tcp_skb_ttl_or_hop_limit()
4267 tp->data_segs_out, TCP_NLA_PAD); in tcp_get_timestamping_opt_stats()
4269 tp->total_retrans, TCP_NLA_PAD); in tcp_get_timestamping_opt_stats()
4271 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_timestamping_opt_stats()
4279 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); in tcp_get_timestamping_opt_stats()
4282 nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); in tcp_get_timestamping_opt_stats()
4283 nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); in tcp_get_timestamping_opt_stats()
4284 nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); in tcp_get_timestamping_opt_stats()
4285 nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); in tcp_get_timestamping_opt_stats()
4286 nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); in tcp_get_timestamping_opt_stats()
4288 nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); in tcp_get_timestamping_opt_stats()
4289 nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); in tcp_get_timestamping_opt_stats()
4291 nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, in tcp_get_timestamping_opt_stats()
4293 nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, in tcp_get_timestamping_opt_stats()
4295 nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); in tcp_get_timestamping_opt_stats()
4296 nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); in tcp_get_timestamping_opt_stats()
4297 nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); in tcp_get_timestamping_opt_stats()
4298 nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); in tcp_get_timestamping_opt_stats()
4300 max_t(int, 0, tp->write_seq - tp->snd_nxt)); in tcp_get_timestamping_opt_stats()
4301 nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, in tcp_get_timestamping_opt_stats()
4307 nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash); in tcp_get_timestamping_opt_stats()
4320 return -EFAULT; in do_tcp_getsockopt()
4323 return -EINVAL; in do_tcp_getsockopt()
4329 val = tp->mss_cache; in do_tcp_getsockopt()
4330 if (tp->rx_opt.user_mss && in do_tcp_getsockopt()
4331 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) in do_tcp_getsockopt()
4332 val = tp->rx_opt.user_mss; in do_tcp_getsockopt()
4333 if (tp->repair) in do_tcp_getsockopt()
4334 val = tp->rx_opt.mss_clamp; in do_tcp_getsockopt()
4337 val = !!(tp->nonagle&TCP_NAGLE_OFF); in do_tcp_getsockopt()
4340 val = !!(tp->nonagle&TCP_NAGLE_CORK); in do_tcp_getsockopt()
4352 val = READ_ONCE(icsk->icsk_syn_retries) ? : in do_tcp_getsockopt()
4353 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); in do_tcp_getsockopt()
4356 val = READ_ONCE(tp->linger2); in do_tcp_getsockopt()
4358 val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; in do_tcp_getsockopt()
4361 val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept); in do_tcp_getsockopt()
4366 val = READ_ONCE(tp->window_clamp); in do_tcp_getsockopt()
4372 return -EFAULT; in do_tcp_getsockopt()
4378 return -EFAULT; in do_tcp_getsockopt()
4380 return -EFAULT; in do_tcp_getsockopt()
4390 return -EFAULT; in do_tcp_getsockopt()
4392 ca_ops = icsk->icsk_ca_ops; in do_tcp_getsockopt()
4393 if (ca_ops && ca_ops->get_info) in do_tcp_getsockopt()
4394 sz = ca_ops->get_info(sk, ~0U, &attr, &info); in do_tcp_getsockopt()
4398 return -EFAULT; in do_tcp_getsockopt()
4400 return -EFAULT; in do_tcp_getsockopt()
4409 return -EFAULT; in do_tcp_getsockopt()
4412 return -EFAULT; in do_tcp_getsockopt()
4413 if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) in do_tcp_getsockopt()
4414 return -EFAULT; in do_tcp_getsockopt()
4419 return -EFAULT; in do_tcp_getsockopt()
4421 if (!icsk->icsk_ulp_ops) { in do_tcp_getsockopt()
4424 return -EFAULT; in do_tcp_getsockopt()
4428 return -EFAULT; in do_tcp_getsockopt()
4429 if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) in do_tcp_getsockopt()
4430 return -EFAULT; in do_tcp_getsockopt()
4438 return -EFAULT; in do_tcp_getsockopt()
4444 return -EFAULT; in do_tcp_getsockopt()
4446 return -EFAULT; in do_tcp_getsockopt()
4450 val = tp->thin_lto; in do_tcp_getsockopt()
4458 val = tp->repair; in do_tcp_getsockopt()
4462 if (tp->repair) in do_tcp_getsockopt()
4463 val = tp->repair_queue; in do_tcp_getsockopt()
4465 return -EINVAL; in do_tcp_getsockopt()
4472 return -EFAULT; in do_tcp_getsockopt()
4475 return -EINVAL; in do_tcp_getsockopt()
4477 if (!tp->repair) in do_tcp_getsockopt()
4478 return -EPERM; in do_tcp_getsockopt()
4480 opt.snd_wl1 = tp->snd_wl1; in do_tcp_getsockopt()
4481 opt.snd_wnd = tp->snd_wnd; in do_tcp_getsockopt()
4482 opt.max_window = tp->max_window; in do_tcp_getsockopt()
4483 opt.rcv_wnd = tp->rcv_wnd; in do_tcp_getsockopt()
4484 opt.rcv_wup = tp->rcv_wup; in do_tcp_getsockopt()
4487 return -EFAULT; in do_tcp_getsockopt()
4491 if (tp->repair_queue == TCP_SEND_QUEUE) in do_tcp_getsockopt()
4492 val = tp->write_seq; in do_tcp_getsockopt()
4493 else if (tp->repair_queue == TCP_RECV_QUEUE) in do_tcp_getsockopt()
4494 val = tp->rcv_nxt; in do_tcp_getsockopt()
4496 return -EINVAL; in do_tcp_getsockopt()
4500 val = READ_ONCE(icsk->icsk_user_timeout); in do_tcp_getsockopt()
4504 val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen); in do_tcp_getsockopt()
4508 val = tp->fastopen_connect; in do_tcp_getsockopt()
4512 val = tp->fastopen_no_cookie; in do_tcp_getsockopt()
4516 val = READ_ONCE(tp->tcp_tx_delay); in do_tcp_getsockopt()
4520 val = tcp_clock_ts(tp->tcp_usec_ts) + READ_ONCE(tp->tsoffset); in do_tcp_getsockopt()
4521 if (tp->tcp_usec_ts) in do_tcp_getsockopt()
4527 val = READ_ONCE(tp->notsent_lowat); in do_tcp_getsockopt()
4530 val = tp->recvmsg_inq; in do_tcp_getsockopt()
4533 val = tp->save_syn; in do_tcp_getsockopt()
4537 return -EFAULT; in do_tcp_getsockopt()
4540 if (tp->saved_syn) { in do_tcp_getsockopt()
4541 if (len < tcp_saved_syn_len(tp->saved_syn)) { in do_tcp_getsockopt()
4542 len = tcp_saved_syn_len(tp->saved_syn); in do_tcp_getsockopt()
4545 return -EFAULT; in do_tcp_getsockopt()
4548 return -EINVAL; in do_tcp_getsockopt()
4550 len = tcp_saved_syn_len(tp->saved_syn); in do_tcp_getsockopt()
4553 return -EFAULT; in do_tcp_getsockopt()
4555 if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { in do_tcp_getsockopt()
4557 return -EFAULT; in do_tcp_getsockopt()
4565 return -EFAULT; in do_tcp_getsockopt()
4576 return -EFAULT; in do_tcp_getsockopt()
4579 return -EINVAL; in do_tcp_getsockopt()
4582 len - sizeof(zc)); in do_tcp_getsockopt()
4584 return err == 0 ? -EINVAL : err; in do_tcp_getsockopt()
4587 return -EFAULT; in do_tcp_getsockopt()
4590 return -EFAULT; in do_tcp_getsockopt()
4592 return -EINVAL; in do_tcp_getsockopt()
4594 return -EINVAL; in do_tcp_getsockopt()
4630 err = -EFAULT; in do_tcp_getsockopt()
4636 return -EPERM; in do_tcp_getsockopt()
4655 return -ENOPROTOOPT; in do_tcp_getsockopt()
4659 return -EFAULT; in do_tcp_getsockopt()
4661 return -EFAULT; in do_tcp_getsockopt()
4684 return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname, in tcp_getsockopt()
4692 int tcp_md5_sigpool_id = -1;
4704 * id would stay the same. Re-write the id only for the case in tcp_md5_alloc_sigpool()
4728 u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ in tcp_md5_hash_key()
4731 sg_init_one(&sg, key->key, keylen); in tcp_md5_hash_key()
4732 ahash_request_set_crypt(hp->req, &sg, NULL, keylen); in tcp_md5_hash_key()
4735 * key->key under us in tcp_md5_hash_key()
4737 return data_race(crypto_ahash_update(hp->req)); in tcp_md5_hash_key()
4747 /* This gets called for each TCP segment that has TCP-MD5 option. in tcp_inbound_md5_hash()
4768 * IPv4-mapped case. in tcp_inbound_md5_hash()
4773 genhash = tp->af_specific->calc_md5_hash(newhash, key, in tcp_inbound_md5_hash()
4816 keyid = aoh->keyid; in tcp_inbound_hash()
4817 rnext = aoh->rnext_keyid; in tcp_inbound_hash()
4836 /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid in tcp_inbound_hash()
4837 * for the remote peer. On TCP-AO established connection in tcp_inbound_hash()
4869 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); in tcp_done()
4871 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) in tcp_done()
4879 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); in tcp_done()
4882 sk->sk_state_change(sk); in tcp_done()
4896 inet_csk_reqsk_queue_drop(req->rsk_listener, req); in tcp_abort()
4903 refcount_inc(&tw->tw_refcnt); in tcp_abort()
4916 if (sk->sk_state == TCP_CLOSE) { in tcp_abort()
4919 return -ENOENT; in tcp_abort()
4922 if (sk->sk_state == TCP_LISTEN) { in tcp_abort()
4931 if (tcp_need_reset(sk->sk_state)) in tcp_abort()
4974 /* TX read-mostly hotpath cache lines */ in tcp_struct_check()
4984 /* TXRX read-mostly hotpath cache lines */ in tcp_struct_check()
4995 /* RX read-mostly hotpath cache lines */ in tcp_struct_check()
5010 /* TX read-write hotpath cache lines */ in tcp_struct_check()
5028 /* TXRX read-write hotpath cache lines */ in tcp_struct_check()
5050 /* RX read-write hotpath cache lines */ in tcp_struct_check()
5146 /* Set per-socket limits to no more than 1/128 the pressure threshold */ in tcp_init()
5147 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); in tcp_init()