Lines Matching +full:flow +full:- +full:level

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 * interface as the means of communication with the user level.
32 #include <net/flow.h>
73 return ip_hdr(skb)->ihl * 4; in ip_hdrlen()
89 *ipcm = (struct ipcm_cookie) { .tos = -1 }; in ipcm_init()
97 ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark); in ipcm_init_sk()
98 ipcm->sockc.priority = READ_ONCE(inet->sk.sk_priority); in ipcm_init_sk()
99 ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags); in ipcm_init_sk()
100 ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if); in ipcm_init_sk()
101 ipcm->addr = inet->inet_saddr; in ipcm_init_sk()
102 ipcm->protocol = inet->inet_num; in ipcm_init_sk()
105 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
106 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
112 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) in inet_sdif()
113 return IPCB(skb)->iif; in inet_sdif()
123 It is user level problem, what it will make with them.
124 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
187 struct sk_buff *skb = iter->frag; in ip_fraglist_next()
189 iter->frag = skb->next; in ip_fraglist_next()
244 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); in ip_finish_skb()
252 if (sock_flag(&inet->sk, SOCK_LOCALROUTE) || in ip_sendmsg_scope()
253 msg->msg_flags & MSG_DONTROUTE || in ip_sendmsg_scope()
254 (ipc->opt && ipc->opt->opt.is_strictroute)) in ip_sendmsg_scope()
262 u8 dsfield = ipc->tos != -1 ? ipc->tos : READ_ONCE(inet->tos); in get_rttos()
278 /* -1 if not needed */
288 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; in ip_reply_arg_flowi_flags()
298 #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
299 #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
300 #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
301 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
302 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
303 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, v…
304 #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
305 #define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
306 #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
307 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
358 u32 range = READ_ONCE(net->ipv4.ip_local_ports.range); in inet_get_local_port_range()
368 if (!net->ipv4.sysctl_local_reserved_ports) in inet_is_local_reserved_port()
370 return test_bit(port, net->ipv4.sysctl_local_reserved_ports); in inet_is_local_reserved_port()
380 return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock); in inet_port_requires_bind_service()
407 (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
411 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; in ip_is_fragment()
418 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
422 u32 check = (__force u32)iph->check; in ip_decrease_ttl()
424 iph->check = (__force __sum16)(check + (check>=0xFFFF)); in ip_decrease_ttl()
425 return --iph->ttl; in ip_decrease_ttl()
430 return inet_dsfield_to_dscp(ip4h->tos); in ip4h_dscp()
437 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU); in ip_mtu_locked()
443 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); in ip_dont_fragment()
452 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); in ip_sk_accept_pmtu()
460 return READ_ONCE(inet_sk(sk)->pmtudisc) < IP_PMTUDISC_PROBE; in ip_sk_use_pmtu()
465 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); in ip_sk_ignore_df()
479 net = dev_net_rcu(dst->dev); in ip_dst_mtu_maybe_forward()
480 if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) || in ip_dst_mtu_maybe_forward()
483 mtu = rt->rt_pmtu; in ip_dst_mtu_maybe_forward()
484 if (mtu && time_before(jiffies, rt->dst.expires)) in ip_dst_mtu_maybe_forward()
493 mtu = READ_ONCE(dst->dev->mtu); in ip_dst_mtu_maybe_forward()
496 if (rt->rt_uses_gateway && mtu > 576) in ip_dst_mtu_maybe_forward()
503 res = mtu - lwtunnel_headroom(dst->lwtstate, mtu); in ip_dst_mtu_maybe_forward()
516 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; in ip_skb_dst_mtu()
521 mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); in ip_skb_dst_mtu()
522 return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); in ip_skb_dst_mtu()
530 refcount_dec_and_test(&fib_metrics->refcnt)) in ip_fib_metrics_put()
538 dst_init_metrics(dst, fib_metrics->metrics, true); in ip_dst_init_metrics()
541 dst->_metrics |= DST_METRICS_REFCOUNTED; in ip_dst_init_metrics()
542 refcount_inc(&fib_metrics->refcnt); in ip_dst_init_metrics()
551 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) in ip_dst_metrics_put()
565 if (sk && inet_sk(sk)->inet_daddr) { in ip_select_ident_segs()
573 val = atomic_read(&inet_sk(sk)->inet_id); in ip_select_ident_segs()
574 atomic_set(&inet_sk(sk)->inet_id, val + segs); in ip_select_ident_segs()
576 val = atomic_add_return(segs, &inet_sk(sk)->inet_id); in ip_select_ident_segs()
578 iph->id = htons(val); in ip_select_ident_segs()
581 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { in ip_select_ident_segs()
582 iph->id = 0; in ip_select_ident_segs()
597 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, in inet_compute_pseudo()
598 skb->len, proto, 0); in inet_compute_pseudo()
602 * Equivalent to : flow->v4addrs.src = iph->saddr;
603 * flow->v4addrs.dst = iph->daddr;
605 static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow, in iph_to_flow_copy_v4addrs() argument
608 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) != in iph_to_flow_copy_v4addrs()
609 offsetof(typeof(flow->addrs), v4addrs.src) + in iph_to_flow_copy_v4addrs()
610 sizeof(flow->addrs.v4addrs.src)); in iph_to_flow_copy_v4addrs()
611 memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs)); in iph_to_flow_copy_v4addrs()
612 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; in iph_to_flow_copy_v4addrs()
633 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
682 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; in inet_reset_saddr()
684 if (sk->sk_family == PF_INET6) { in inet_reset_saddr()
687 memset(&np->saddr, 0, sizeof(np->saddr)); in inet_reset_saddr()
688 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); in inet_reset_saddr()
772 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt); in ip_options_echo()
796 int do_ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
798 int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
800 int do_ip_getsockopt(struct sock *sk, int level, int optname,
802 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
815 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0); in ip_cmsg_recv()