1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Management Component Transport Protocol (MCTP) - device implementation.
4 *
5 * Copyright (c) 2021 Code Construct
6 * Copyright (c) 2021 Google
7 */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_link.h>
11 #include <linux/mctp.h>
12 #include <linux/netdevice.h>
13 #include <linux/rcupdate.h>
14 #include <linux/rtnetlink.h>
15
16 #include <net/addrconf.h>
17 #include <net/netlink.h>
18 #include <net/mctp.h>
19 #include <net/mctpdevice.h>
20 #include <net/sock.h>
21
22 struct mctp_dump_cb {
23 unsigned long ifindex;
24 size_t a_idx;
25 };
26
27 /* unlocked: caller must hold rcu_read_lock.
28 * Returned mctp_dev has its refcount incremented, or NULL if unset.
29 */
__mctp_dev_get(const struct net_device * dev)30 struct mctp_dev *__mctp_dev_get(const struct net_device *dev)
31 {
32 struct mctp_dev *mdev = rcu_dereference(dev->mctp_ptr);
33
34 /* RCU guarantees that any mdev is still live.
35 * Zero refcount implies a pending free, return NULL.
36 */
37 if (mdev)
38 if (!refcount_inc_not_zero(&mdev->refs))
39 return NULL;
40 return mdev;
41 }
42
43 /* Returned mctp_dev does not have refcount incremented. The returned pointer
44 * remains live while rtnl_lock is held, as that prevents mctp_unregister()
45 */
mctp_dev_get_rtnl(const struct net_device * dev)46 struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev)
47 {
48 return rtnl_dereference(dev->mctp_ptr);
49 }
50
mctp_addrinfo_size(void)51 static int mctp_addrinfo_size(void)
52 {
53 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
54 + nla_total_size(1) // IFA_LOCAL
55 + nla_total_size(1) // IFA_ADDRESS
56 ;
57 }
58
59 /* flag should be NLM_F_MULTI for dump calls */
mctp_fill_addrinfo(struct sk_buff * skb,struct mctp_dev * mdev,mctp_eid_t eid,int msg_type,u32 portid,u32 seq,int flag)60 static int mctp_fill_addrinfo(struct sk_buff *skb,
61 struct mctp_dev *mdev, mctp_eid_t eid,
62 int msg_type, u32 portid, u32 seq, int flag)
63 {
64 struct ifaddrmsg *hdr;
65 struct nlmsghdr *nlh;
66
67 nlh = nlmsg_put(skb, portid, seq,
68 msg_type, sizeof(*hdr), flag);
69 if (!nlh)
70 return -EMSGSIZE;
71
72 hdr = nlmsg_data(nlh);
73 hdr->ifa_family = AF_MCTP;
74 hdr->ifa_prefixlen = 0;
75 hdr->ifa_flags = 0;
76 hdr->ifa_scope = 0;
77 hdr->ifa_index = mdev->dev->ifindex;
78
79 if (nla_put_u8(skb, IFA_LOCAL, eid))
80 goto cancel;
81
82 if (nla_put_u8(skb, IFA_ADDRESS, eid))
83 goto cancel;
84
85 nlmsg_end(skb, nlh);
86
87 return 0;
88
89 cancel:
90 nlmsg_cancel(skb, nlh);
91 return -EMSGSIZE;
92 }
93
mctp_dump_dev_addrinfo(struct mctp_dev * mdev,struct sk_buff * skb,struct netlink_callback * cb)94 static int mctp_dump_dev_addrinfo(struct mctp_dev *mdev, struct sk_buff *skb,
95 struct netlink_callback *cb)
96 {
97 struct mctp_dump_cb *mcb = (void *)cb->ctx;
98 u32 portid, seq;
99 int rc = 0;
100
101 portid = NETLINK_CB(cb->skb).portid;
102 seq = cb->nlh->nlmsg_seq;
103 for (; mcb->a_idx < mdev->num_addrs; mcb->a_idx++) {
104 rc = mctp_fill_addrinfo(skb, mdev, mdev->addrs[mcb->a_idx],
105 RTM_NEWADDR, portid, seq, NLM_F_MULTI);
106 if (rc < 0)
107 break;
108 }
109
110 return rc;
111 }
112
mctp_dump_addrinfo(struct sk_buff * skb,struct netlink_callback * cb)113 static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb)
114 {
115 struct mctp_dump_cb *mcb = (void *)cb->ctx;
116 struct net *net = sock_net(skb->sk);
117 struct net_device *dev;
118 struct ifaddrmsg *hdr;
119 struct mctp_dev *mdev;
120 int ifindex, rc;
121
122 hdr = nlmsg_data(cb->nlh);
123 // filter by ifindex if requested
124 ifindex = hdr->ifa_index;
125
126 rcu_read_lock();
127 for_each_netdev_dump(net, dev, mcb->ifindex) {
128 if (ifindex && ifindex != dev->ifindex)
129 continue;
130 mdev = __mctp_dev_get(dev);
131 if (!mdev)
132 continue;
133 rc = mctp_dump_dev_addrinfo(mdev, skb, cb);
134 mctp_dev_put(mdev);
135 if (rc < 0)
136 break;
137 mcb->a_idx = 0;
138 }
139 rcu_read_unlock();
140
141 return skb->len;
142 }
143
mctp_addr_notify(struct mctp_dev * mdev,mctp_eid_t eid,int msg_type,struct sk_buff * req_skb,struct nlmsghdr * req_nlh)144 static void mctp_addr_notify(struct mctp_dev *mdev, mctp_eid_t eid, int msg_type,
145 struct sk_buff *req_skb, struct nlmsghdr *req_nlh)
146 {
147 u32 portid = NETLINK_CB(req_skb).portid;
148 struct net *net = dev_net(mdev->dev);
149 struct sk_buff *skb;
150 int rc = -ENOBUFS;
151
152 skb = nlmsg_new(mctp_addrinfo_size(), GFP_KERNEL);
153 if (!skb)
154 goto out;
155
156 rc = mctp_fill_addrinfo(skb, mdev, eid, msg_type,
157 portid, req_nlh->nlmsg_seq, 0);
158 if (rc < 0) {
159 WARN_ON_ONCE(rc == -EMSGSIZE);
160 goto out;
161 }
162
163 rtnl_notify(skb, net, portid, RTNLGRP_MCTP_IFADDR, req_nlh, GFP_KERNEL);
164 return;
165 out:
166 kfree_skb(skb);
167 rtnl_set_sk_err(net, RTNLGRP_MCTP_IFADDR, rc);
168 }
169
170 static const struct nla_policy ifa_mctp_policy[IFA_MAX + 1] = {
171 [IFA_ADDRESS] = { .type = NLA_U8 },
172 [IFA_LOCAL] = { .type = NLA_U8 },
173 };
174
mctp_rtm_newaddr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)175 static int mctp_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
176 struct netlink_ext_ack *extack)
177 {
178 struct net *net = sock_net(skb->sk);
179 struct nlattr *tb[IFA_MAX + 1];
180 struct net_device *dev;
181 struct mctp_addr *addr;
182 struct mctp_dev *mdev;
183 struct ifaddrmsg *ifm;
184 unsigned long flags;
185 u8 *tmp_addrs;
186 int rc;
187
188 rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_mctp_policy,
189 extack);
190 if (rc < 0)
191 return rc;
192
193 ifm = nlmsg_data(nlh);
194
195 if (tb[IFA_LOCAL])
196 addr = nla_data(tb[IFA_LOCAL]);
197 else if (tb[IFA_ADDRESS])
198 addr = nla_data(tb[IFA_ADDRESS]);
199 else
200 return -EINVAL;
201
202 /* find device */
203 dev = __dev_get_by_index(net, ifm->ifa_index);
204 if (!dev)
205 return -ENODEV;
206
207 mdev = mctp_dev_get_rtnl(dev);
208 if (!mdev)
209 return -ENODEV;
210
211 if (!mctp_address_unicast(addr->s_addr))
212 return -EINVAL;
213
214 /* Prevent duplicates. Under RTNL so don't need to lock for reading */
215 if (memchr(mdev->addrs, addr->s_addr, mdev->num_addrs))
216 return -EEXIST;
217
218 tmp_addrs = kmalloc(mdev->num_addrs + 1, GFP_KERNEL);
219 if (!tmp_addrs)
220 return -ENOMEM;
221 memcpy(tmp_addrs, mdev->addrs, mdev->num_addrs);
222 tmp_addrs[mdev->num_addrs] = addr->s_addr;
223
224 /* Lock to write */
225 spin_lock_irqsave(&mdev->addrs_lock, flags);
226 mdev->num_addrs++;
227 swap(mdev->addrs, tmp_addrs);
228 spin_unlock_irqrestore(&mdev->addrs_lock, flags);
229
230 kfree(tmp_addrs);
231
232 mctp_addr_notify(mdev, addr->s_addr, RTM_NEWADDR, skb, nlh);
233 mctp_route_add_local(mdev, addr->s_addr);
234
235 return 0;
236 }
237
mctp_rtm_deladdr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)238 static int mctp_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
239 struct netlink_ext_ack *extack)
240 {
241 struct net *net = sock_net(skb->sk);
242 struct nlattr *tb[IFA_MAX + 1];
243 struct net_device *dev;
244 struct mctp_addr *addr;
245 struct mctp_dev *mdev;
246 struct ifaddrmsg *ifm;
247 unsigned long flags;
248 u8 *pos;
249 int rc;
250
251 rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_mctp_policy,
252 extack);
253 if (rc < 0)
254 return rc;
255
256 ifm = nlmsg_data(nlh);
257
258 if (tb[IFA_LOCAL])
259 addr = nla_data(tb[IFA_LOCAL]);
260 else if (tb[IFA_ADDRESS])
261 addr = nla_data(tb[IFA_ADDRESS]);
262 else
263 return -EINVAL;
264
265 /* find device */
266 dev = __dev_get_by_index(net, ifm->ifa_index);
267 if (!dev)
268 return -ENODEV;
269
270 mdev = mctp_dev_get_rtnl(dev);
271 if (!mdev)
272 return -ENODEV;
273
274 pos = memchr(mdev->addrs, addr->s_addr, mdev->num_addrs);
275 if (!pos)
276 return -ENOENT;
277
278 rc = mctp_route_remove_local(mdev, addr->s_addr);
279 // we can ignore -ENOENT in the case a route was already removed
280 if (rc < 0 && rc != -ENOENT)
281 return rc;
282
283 spin_lock_irqsave(&mdev->addrs_lock, flags);
284 memmove(pos, pos + 1, mdev->num_addrs - 1 - (pos - mdev->addrs));
285 mdev->num_addrs--;
286 spin_unlock_irqrestore(&mdev->addrs_lock, flags);
287
288 mctp_addr_notify(mdev, addr->s_addr, RTM_DELADDR, skb, nlh);
289
290 return 0;
291 }
292
mctp_dev_hold(struct mctp_dev * mdev)293 void mctp_dev_hold(struct mctp_dev *mdev)
294 {
295 refcount_inc(&mdev->refs);
296 }
297
mctp_dev_put(struct mctp_dev * mdev)298 void mctp_dev_put(struct mctp_dev *mdev)
299 {
300 if (mdev && refcount_dec_and_test(&mdev->refs)) {
301 kfree(mdev->addrs);
302 dev_put(mdev->dev);
303 kfree_rcu(mdev, rcu);
304 }
305 }
306
mctp_dev_release_key(struct mctp_dev * dev,struct mctp_sk_key * key)307 void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key)
308 __must_hold(&key->lock)
309 {
310 if (!dev)
311 return;
312 if (dev->ops && dev->ops->release_flow)
313 dev->ops->release_flow(dev, key);
314 key->dev = NULL;
315 mctp_dev_put(dev);
316 }
317
mctp_dev_set_key(struct mctp_dev * dev,struct mctp_sk_key * key)318 void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key)
319 __must_hold(&key->lock)
320 {
321 mctp_dev_hold(dev);
322 key->dev = dev;
323 }
324
mctp_add_dev(struct net_device * dev)325 static struct mctp_dev *mctp_add_dev(struct net_device *dev)
326 {
327 struct mctp_dev *mdev;
328
329 ASSERT_RTNL();
330
331 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
332 if (!mdev)
333 return ERR_PTR(-ENOMEM);
334
335 spin_lock_init(&mdev->addrs_lock);
336
337 mdev->net = mctp_default_net(dev_net(dev));
338
339 /* associate to net_device */
340 refcount_set(&mdev->refs, 1);
341 rcu_assign_pointer(dev->mctp_ptr, mdev);
342
343 dev_hold(dev);
344 mdev->dev = dev;
345
346 return mdev;
347 }
348
mctp_fill_link_af(struct sk_buff * skb,const struct net_device * dev,u32 ext_filter_mask)349 static int mctp_fill_link_af(struct sk_buff *skb,
350 const struct net_device *dev, u32 ext_filter_mask)
351 {
352 struct mctp_dev *mdev;
353
354 mdev = mctp_dev_get_rtnl(dev);
355 if (!mdev)
356 return -ENODATA;
357 if (nla_put_u32(skb, IFLA_MCTP_NET, mdev->net))
358 return -EMSGSIZE;
359 if (nla_put_u8(skb, IFLA_MCTP_PHYS_BINDING, mdev->binding))
360 return -EMSGSIZE;
361 return 0;
362 }
363
mctp_get_link_af_size(const struct net_device * dev,u32 ext_filter_mask)364 static size_t mctp_get_link_af_size(const struct net_device *dev,
365 u32 ext_filter_mask)
366 {
367 struct mctp_dev *mdev;
368 unsigned int ret;
369
370 /* caller holds RCU */
371 mdev = __mctp_dev_get(dev);
372 if (!mdev)
373 return 0;
374 ret = nla_total_size(4); /* IFLA_MCTP_NET */
375 ret += nla_total_size(1); /* IFLA_MCTP_PHYS_BINDING */
376 mctp_dev_put(mdev);
377 return ret;
378 }
379
380 static const struct nla_policy ifla_af_mctp_policy[IFLA_MCTP_MAX + 1] = {
381 [IFLA_MCTP_NET] = { .type = NLA_U32 },
382 };
383
mctp_set_link_af(struct net_device * dev,const struct nlattr * attr,struct netlink_ext_ack * extack)384 static int mctp_set_link_af(struct net_device *dev, const struct nlattr *attr,
385 struct netlink_ext_ack *extack)
386 {
387 struct nlattr *tb[IFLA_MCTP_MAX + 1];
388 struct mctp_dev *mdev;
389 int rc;
390
391 rc = nla_parse_nested(tb, IFLA_MCTP_MAX, attr, ifla_af_mctp_policy,
392 NULL);
393 if (rc)
394 return rc;
395
396 mdev = mctp_dev_get_rtnl(dev);
397 if (!mdev)
398 return 0;
399
400 if (tb[IFLA_MCTP_NET])
401 WRITE_ONCE(mdev->net, nla_get_u32(tb[IFLA_MCTP_NET]));
402
403 return 0;
404 }
405
406 /* Matches netdev types that should have MCTP handling */
mctp_known(struct net_device * dev)407 static bool mctp_known(struct net_device *dev)
408 {
409 /* only register specific types (inc. NONE for TUN devices) */
410 return dev->type == ARPHRD_MCTP ||
411 dev->type == ARPHRD_LOOPBACK ||
412 dev->type == ARPHRD_NONE;
413 }
414
mctp_unregister(struct net_device * dev)415 static void mctp_unregister(struct net_device *dev)
416 {
417 struct mctp_dev *mdev;
418
419 mdev = mctp_dev_get_rtnl(dev);
420 if (!mdev)
421 return;
422
423 RCU_INIT_POINTER(mdev->dev->mctp_ptr, NULL);
424
425 mctp_route_remove_dev(mdev);
426 mctp_neigh_remove_dev(mdev);
427
428 mctp_dev_put(mdev);
429 }
430
mctp_register(struct net_device * dev)431 static int mctp_register(struct net_device *dev)
432 {
433 struct mctp_dev *mdev;
434
435 /* Already registered? */
436 if (rtnl_dereference(dev->mctp_ptr))
437 return 0;
438
439 /* only register specific types */
440 if (!mctp_known(dev))
441 return 0;
442
443 mdev = mctp_add_dev(dev);
444 if (IS_ERR(mdev))
445 return PTR_ERR(mdev);
446
447 return 0;
448 }
449
mctp_dev_notify(struct notifier_block * this,unsigned long event,void * ptr)450 static int mctp_dev_notify(struct notifier_block *this, unsigned long event,
451 void *ptr)
452 {
453 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
454 int rc;
455
456 switch (event) {
457 case NETDEV_REGISTER:
458 rc = mctp_register(dev);
459 if (rc)
460 return notifier_from_errno(rc);
461 break;
462 case NETDEV_UNREGISTER:
463 mctp_unregister(dev);
464 break;
465 }
466
467 return NOTIFY_OK;
468 }
469
mctp_register_netdevice(struct net_device * dev,const struct mctp_netdev_ops * ops,enum mctp_phys_binding binding)470 static int mctp_register_netdevice(struct net_device *dev,
471 const struct mctp_netdev_ops *ops,
472 enum mctp_phys_binding binding)
473 {
474 struct mctp_dev *mdev;
475
476 mdev = mctp_add_dev(dev);
477 if (IS_ERR(mdev))
478 return PTR_ERR(mdev);
479
480 mdev->ops = ops;
481 mdev->binding = binding;
482
483 return register_netdevice(dev);
484 }
485
mctp_register_netdev(struct net_device * dev,const struct mctp_netdev_ops * ops,enum mctp_phys_binding binding)486 int mctp_register_netdev(struct net_device *dev,
487 const struct mctp_netdev_ops *ops,
488 enum mctp_phys_binding binding)
489 {
490 int rc;
491
492 rtnl_lock();
493 rc = mctp_register_netdevice(dev, ops, binding);
494 rtnl_unlock();
495
496 return rc;
497 }
498 EXPORT_SYMBOL_GPL(mctp_register_netdev);
499
mctp_unregister_netdev(struct net_device * dev)500 void mctp_unregister_netdev(struct net_device *dev)
501 {
502 unregister_netdev(dev);
503 }
504 EXPORT_SYMBOL_GPL(mctp_unregister_netdev);
505
506 static struct rtnl_af_ops mctp_af_ops = {
507 .family = AF_MCTP,
508 .fill_link_af = mctp_fill_link_af,
509 .get_link_af_size = mctp_get_link_af_size,
510 .set_link_af = mctp_set_link_af,
511 };
512
513 static struct notifier_block mctp_dev_nb = {
514 .notifier_call = mctp_dev_notify,
515 .priority = ADDRCONF_NOTIFY_PRIORITY,
516 };
517
518 static const struct rtnl_msg_handler mctp_device_rtnl_msg_handlers[] = {
519 {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_NEWADDR,
520 .doit = mctp_rtm_newaddr},
521 {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_DELADDR,
522 .doit = mctp_rtm_deladdr},
523 {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_GETADDR,
524 .dumpit = mctp_dump_addrinfo},
525 };
526
mctp_device_init(void)527 int __init mctp_device_init(void)
528 {
529 int err;
530
531 register_netdevice_notifier(&mctp_dev_nb);
532
533 err = rtnl_af_register(&mctp_af_ops);
534 if (err)
535 goto err_notifier;
536
537 err = rtnl_register_many(mctp_device_rtnl_msg_handlers);
538 if (err)
539 goto err_af;
540
541 return 0;
542 err_af:
543 rtnl_af_unregister(&mctp_af_ops);
544 err_notifier:
545 unregister_netdevice_notifier(&mctp_dev_nb);
546 return err;
547 }
548
mctp_device_exit(void)549 void __exit mctp_device_exit(void)
550 {
551 rtnl_unregister_many(mctp_device_rtnl_msg_handlers);
552 rtnl_af_unregister(&mctp_af_ops);
553 unregister_netdevice_notifier(&mctp_dev_nb);
554 }
555