1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/core/dst.c Protocol independent destination cache.
4 *
5 * Authors: Alexey Kuznetsov, <[email protected]>
6 *
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/workqueue.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <net/net_namespace.h>
22 #include <linux/sched.h>
23 #include <linux/prefetch.h>
24 #include <net/lwtunnel.h>
25 #include <net/xfrm.h>
26
27 #include <net/dst.h>
28 #include <net/dst_metadata.h>
29
dst_discard_out(struct net * net,struct sock * sk,struct sk_buff * skb)30 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
31 {
32 kfree_skb(skb);
33 return 0;
34 }
35 EXPORT_SYMBOL(dst_discard_out);
36
37 const struct dst_metrics dst_default_metrics = {
38 /* This initializer is needed to force linker to place this variable
39 * into const section. Otherwise it might end into bss section.
40 * We really want to avoid false sharing on this variable, and catch
41 * any writes on it.
42 */
43 .refcnt = REFCOUNT_INIT(1),
44 };
45 EXPORT_SYMBOL(dst_default_metrics);
46
dst_init(struct dst_entry * dst,struct dst_ops * ops,struct net_device * dev,int initial_obsolete,unsigned short flags)47 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
48 struct net_device *dev, int initial_obsolete,
49 unsigned short flags)
50 {
51 dst->dev = dev;
52 netdev_hold(dev, &dst->dev_tracker, GFP_ATOMIC);
53 dst->ops = ops;
54 dst_init_metrics(dst, dst_default_metrics.metrics, true);
55 dst->expires = 0UL;
56 #ifdef CONFIG_XFRM
57 dst->xfrm = NULL;
58 #endif
59 dst->input = dst_discard;
60 dst->output = dst_discard_out;
61 dst->error = 0;
62 dst->obsolete = initial_obsolete;
63 dst->header_len = 0;
64 dst->trailer_len = 0;
65 #ifdef CONFIG_IP_ROUTE_CLASSID
66 dst->tclassid = 0;
67 #endif
68 dst->lwtstate = NULL;
69 rcuref_init(&dst->__rcuref, 1);
70 INIT_LIST_HEAD(&dst->rt_uncached);
71 dst->__use = 0;
72 dst->lastuse = jiffies;
73 dst->flags = flags;
74 if (!(flags & DST_NOCOUNT))
75 dst_entries_add(ops, 1);
76 }
77 EXPORT_SYMBOL(dst_init);
78
dst_alloc(struct dst_ops * ops,struct net_device * dev,int initial_obsolete,unsigned short flags)79 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
80 int initial_obsolete, unsigned short flags)
81 {
82 struct dst_entry *dst;
83
84 if (ops->gc &&
85 !(flags & DST_NOCOUNT) &&
86 dst_entries_get_fast(ops) > ops->gc_thresh)
87 ops->gc(ops);
88
89 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
90 if (!dst)
91 return NULL;
92
93 dst_init(dst, ops, dev, initial_obsolete, flags);
94
95 return dst;
96 }
97 EXPORT_SYMBOL(dst_alloc);
98
dst_destroy(struct dst_entry * dst)99 static void dst_destroy(struct dst_entry *dst)
100 {
101 struct dst_entry *child = NULL;
102
103 smp_rmb();
104
105 #ifdef CONFIG_XFRM
106 if (dst->xfrm) {
107 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
108
109 child = xdst->child;
110 }
111 #endif
112 if (dst->ops->destroy)
113 dst->ops->destroy(dst);
114 netdev_put(dst->dev, &dst->dev_tracker);
115
116 lwtstate_put(dst->lwtstate);
117
118 if (dst->flags & DST_METADATA)
119 metadata_dst_free((struct metadata_dst *)dst);
120 else
121 kmem_cache_free(dst->ops->kmem_cachep, dst);
122
123 dst = child;
124 if (dst)
125 dst_release_immediate(dst);
126 }
127
dst_destroy_rcu(struct rcu_head * head)128 static void dst_destroy_rcu(struct rcu_head *head)
129 {
130 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
131
132 dst_destroy(dst);
133 }
134
135 /* Operations to mark dst as DEAD and clean up the net device referenced
136 * by dst:
137 * 1. put the dst under blackhole interface and discard all tx/rx packets
138 * on this route.
139 * 2. release the net_device
140 * This function should be called when removing routes from the fib tree
141 * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
142 * make the next dst_ops->check() fail.
143 */
dst_dev_put(struct dst_entry * dst)144 void dst_dev_put(struct dst_entry *dst)
145 {
146 struct net_device *dev = dst->dev;
147
148 dst->obsolete = DST_OBSOLETE_DEAD;
149 if (dst->ops->ifdown)
150 dst->ops->ifdown(dst, dev);
151 dst->input = dst_discard;
152 dst->output = dst_discard_out;
153 dst->dev = blackhole_netdev;
154 netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
155 GFP_ATOMIC);
156 }
157 EXPORT_SYMBOL(dst_dev_put);
158
dst_count_dec(struct dst_entry * dst)159 static void dst_count_dec(struct dst_entry *dst)
160 {
161 if (!(dst->flags & DST_NOCOUNT))
162 dst_entries_add(dst->ops, -1);
163 }
164
dst_release(struct dst_entry * dst)165 void dst_release(struct dst_entry *dst)
166 {
167 if (dst && rcuref_put(&dst->__rcuref)) {
168 #ifdef CONFIG_DST_CACHE
169 if (dst->flags & DST_METADATA) {
170 struct metadata_dst *md_dst = (struct metadata_dst *)dst;
171
172 if (md_dst->type == METADATA_IP_TUNNEL)
173 dst_cache_reset_now(&md_dst->u.tun_info.dst_cache);
174 }
175 #endif
176 dst_count_dec(dst);
177 call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu);
178 }
179 }
180 EXPORT_SYMBOL(dst_release);
181
dst_release_immediate(struct dst_entry * dst)182 void dst_release_immediate(struct dst_entry *dst)
183 {
184 if (dst && rcuref_put(&dst->__rcuref)) {
185 dst_count_dec(dst);
186 dst_destroy(dst);
187 }
188 }
189 EXPORT_SYMBOL(dst_release_immediate);
190
dst_cow_metrics_generic(struct dst_entry * dst,unsigned long old)191 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
192 {
193 struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
194
195 if (p) {
196 struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
197 unsigned long prev, new;
198
199 refcount_set(&p->refcnt, 1);
200 memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
201
202 new = (unsigned long) p;
203 prev = cmpxchg(&dst->_metrics, old, new);
204
205 if (prev != old) {
206 kfree(p);
207 p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
208 if (prev & DST_METRICS_READ_ONLY)
209 p = NULL;
210 } else if (prev & DST_METRICS_REFCOUNTED) {
211 if (refcount_dec_and_test(&old_p->refcnt))
212 kfree(old_p);
213 }
214 }
215 BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
216 return (u32 *)p;
217 }
218 EXPORT_SYMBOL(dst_cow_metrics_generic);
219
220 /* Caller asserts that dst_metrics_read_only(dst) is false. */
__dst_destroy_metrics_generic(struct dst_entry * dst,unsigned long old)221 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
222 {
223 unsigned long prev, new;
224
225 new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
226 prev = cmpxchg(&dst->_metrics, old, new);
227 if (prev == old)
228 kfree(__DST_METRICS_PTR(old));
229 }
230 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
231
dst_blackhole_check(struct dst_entry * dst,u32 cookie)232 struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
233 {
234 return NULL;
235 }
236
dst_blackhole_cow_metrics(struct dst_entry * dst,unsigned long old)237 u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
238 {
239 return NULL;
240 }
241
dst_blackhole_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)242 struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
243 struct sk_buff *skb,
244 const void *daddr)
245 {
246 return NULL;
247 }
248
dst_blackhole_update_pmtu(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb,u32 mtu,bool confirm_neigh)249 void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
250 struct sk_buff *skb, u32 mtu,
251 bool confirm_neigh)
252 {
253 }
254 EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
255
dst_blackhole_redirect(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb)256 void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
257 struct sk_buff *skb)
258 {
259 }
260 EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
261
dst_blackhole_mtu(const struct dst_entry * dst)262 unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
263 {
264 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
265
266 return mtu ? : dst->dev->mtu;
267 }
268 EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
269
270 static struct dst_ops dst_blackhole_ops = {
271 .family = AF_UNSPEC,
272 .neigh_lookup = dst_blackhole_neigh_lookup,
273 .check = dst_blackhole_check,
274 .cow_metrics = dst_blackhole_cow_metrics,
275 .update_pmtu = dst_blackhole_update_pmtu,
276 .redirect = dst_blackhole_redirect,
277 .mtu = dst_blackhole_mtu,
278 };
279
__metadata_dst_init(struct metadata_dst * md_dst,enum metadata_type type,u8 optslen)280 static void __metadata_dst_init(struct metadata_dst *md_dst,
281 enum metadata_type type, u8 optslen)
282 {
283 struct dst_entry *dst;
284
285 dst = &md_dst->dst;
286 dst_init(dst, &dst_blackhole_ops, NULL, DST_OBSOLETE_NONE,
287 DST_METADATA | DST_NOCOUNT);
288 memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
289 md_dst->type = type;
290 }
291
metadata_dst_alloc(u8 optslen,enum metadata_type type,gfp_t flags)292 struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
293 gfp_t flags)
294 {
295 struct metadata_dst *md_dst;
296
297 md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
298 if (!md_dst)
299 return NULL;
300
301 __metadata_dst_init(md_dst, type, optslen);
302
303 return md_dst;
304 }
305 EXPORT_SYMBOL_GPL(metadata_dst_alloc);
306
metadata_dst_free(struct metadata_dst * md_dst)307 void metadata_dst_free(struct metadata_dst *md_dst)
308 {
309 #ifdef CONFIG_DST_CACHE
310 if (md_dst->type == METADATA_IP_TUNNEL)
311 dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
312 #endif
313 if (md_dst->type == METADATA_XFRM)
314 dst_release(md_dst->u.xfrm_info.dst_orig);
315 kfree(md_dst);
316 }
317 EXPORT_SYMBOL_GPL(metadata_dst_free);
318
319 struct metadata_dst __percpu *
metadata_dst_alloc_percpu(u8 optslen,enum metadata_type type,gfp_t flags)320 metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
321 {
322 int cpu;
323 struct metadata_dst __percpu *md_dst;
324
325 md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
326 __alignof__(struct metadata_dst), flags);
327 if (!md_dst)
328 return NULL;
329
330 for_each_possible_cpu(cpu)
331 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
332
333 return md_dst;
334 }
335 EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
336
metadata_dst_free_percpu(struct metadata_dst __percpu * md_dst)337 void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst)
338 {
339 int cpu;
340
341 for_each_possible_cpu(cpu) {
342 struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu);
343
344 #ifdef CONFIG_DST_CACHE
345 if (one_md_dst->type == METADATA_IP_TUNNEL)
346 dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache);
347 #endif
348 if (one_md_dst->type == METADATA_XFRM)
349 dst_release(one_md_dst->u.xfrm_info.dst_orig);
350 }
351 free_percpu(md_dst);
352 }
353 EXPORT_SYMBOL_GPL(metadata_dst_free_percpu);
354