Lines Matching full:subflow

269 	u64		local_key;		/* protected by the first subflow socket lock
300 bool recovery; /* closing subflow write queue reinjected */
309 u8 pending_state; /* A subflow asked to set this sk_state,
333 * ONCE annotation, the subflow outside the socket
494 /* MPTCP subflow context */
533 is_mptfo : 1, /* subflow is doing TFO */
546 u8 hmac[MPTCPOPT_HMAC_LEN]; /* MPJ subflow only */
590 mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow) in mptcp_subflow_tcp_sock() argument
592 return subflow->tcp_sock; in mptcp_subflow_tcp_sock()
596 mptcp_subflow_ctx_reset(struct mptcp_subflow_context *subflow) in mptcp_subflow_ctx_reset() argument
598 memset(&subflow->reset, 0, sizeof(subflow->reset)); in mptcp_subflow_ctx_reset()
599 subflow->request_mptcp = 1; in mptcp_subflow_ctx_reset()
600 WRITE_ONCE(subflow->local_id, -1); in mptcp_subflow_ctx_reset()
633 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in mptcp_send_active_reset_reason() local
636 reason = sk_rst_convert_mptcp_reason(subflow->reset_reason); in mptcp_send_active_reset_reason()
641 mptcp_subflow_get_map_offset(const struct mptcp_subflow_context *subflow) in mptcp_subflow_get_map_offset() argument
643 return tcp_sk(mptcp_subflow_tcp_sock(subflow))->copied_seq - in mptcp_subflow_get_map_offset()
644 subflow->ssn_offset - in mptcp_subflow_get_map_offset()
645 subflow->map_subflow_seq; in mptcp_subflow_get_map_offset()
649 mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow) in mptcp_subflow_get_mapped_dsn() argument
651 return subflow->map_seq + mptcp_subflow_get_map_offset(subflow); in mptcp_subflow_get_mapped_dsn()
656 static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action) in mptcp_subflow_delegate() argument
662 /* the caller held the subflow bh socket lock */ in mptcp_subflow_delegate()
669 old = set_mask_bits(&subflow->delegated_status, 0, set_bits); in mptcp_subflow_delegate()
671 if (WARN_ON_ONCE(!list_empty(&subflow->delegated_node))) in mptcp_subflow_delegate()
676 list_add_tail(&subflow->delegated_node, &delegated->head); in mptcp_subflow_delegate()
677 sock_hold(mptcp_subflow_tcp_sock(subflow)); in mptcp_subflow_delegate()
711 struct mptcp_subflow_context *subflow,
720 struct mptcp_subflow_context *subflow);
751 void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
787 static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow) in __mptcp_subflow_active() argument
790 if (subflow->request_join && !READ_ONCE(subflow->fully_established)) in __mptcp_subflow_active()
793 return __tcp_can_send(mptcp_subflow_tcp_sock(subflow)); in __mptcp_subflow_active()
796 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow);
798 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow);
917 struct mptcp_subflow_context *subflow; in __mptcp_sync_sndbuf() local
924 mptcp_for_each_subflow(mptcp_sk(sk), subflow) { in __mptcp_sync_sndbuf()
925 ssk_sndbuf = READ_ONCE(mptcp_subflow_tcp_sock(subflow)->sk_sndbuf); in __mptcp_sync_sndbuf()
927 subflow->cached_sndbuf = ssk_sndbuf; in __mptcp_sync_sndbuf()
936 /* The called held both the msk socket and the subflow socket locks,
941 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_propagate_sndbuf() local
943 if (READ_ONCE(ssk->sk_sndbuf) != subflow->cached_sndbuf) in __mptcp_propagate_sndbuf()
947 /* the caller held only the subflow socket lock, either in process or
954 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_propagate_sndbuf() local
956 if (likely(READ_ONCE(ssk->sk_sndbuf) == subflow->cached_sndbuf)) in mptcp_propagate_sndbuf()
960 mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_SNDBUF); in mptcp_propagate_sndbuf()
1010 const struct mptcp_subflow_context *subflow);
1061 void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
1063 void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
1144 static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow) in subflow_get_local_id() argument
1146 int local_id = READ_ONCE(subflow->local_id); in subflow_get_local_id()
1190 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in mptcp_check_fallback() local
1191 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in mptcp_check_fallback()
1218 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_do_fallback() local
1219 struct sock *sk = subflow->conn; in mptcp_do_fallback()
1240 struct mptcp_subflow_context *subflow) in mptcp_subflow_early_fallback() argument
1243 subflow->request_mptcp = 0; in mptcp_subflow_early_fallback()
1258 static inline bool is_active_ssk(struct mptcp_subflow_context *subflow) in is_active_ssk() argument
1260 return (subflow->request_mptcp || subflow->request_join); in is_active_ssk()
1265 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_simultaneous_connect() local
1269 is_active_ssk(subflow) && in subflow_simultaneous_connect()
1270 !subflow->conn_finished; in subflow_simultaneous_connect()