Lines Matching full:bond

266 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
293 * @bond: bond device that got this skb for tx.
297 netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, in bond_dev_queue_xmit() argument
306 if (unlikely(netpoll_tx_running(bond->dev))) in bond_dev_queue_xmit()
307 return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); in bond_dev_queue_xmit()
312 static bool bond_sk_check(struct bonding *bond) in bond_sk_check() argument
314 switch (BOND_MODE(bond)) { in bond_sk_check()
317 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) in bond_sk_check()
325 bool bond_xdp_check(struct bonding *bond, int mode) in bond_xdp_check() argument
336 if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC) in bond_xdp_check()
371 struct bonding *bond = netdev_priv(bond_dev); in bond_vlan_rx_add_vid() local
376 bond_for_each_slave(bond, slave, iter) { in bond_vlan_rx_add_vid()
386 bond_for_each_slave(bond, rollback_slave, iter) { in bond_vlan_rx_add_vid()
405 struct bonding *bond = netdev_priv(bond_dev); in bond_vlan_rx_kill_vid() local
409 bond_for_each_slave(bond, slave, iter) in bond_vlan_rx_kill_vid()
412 if (bond_is_lb(bond)) in bond_vlan_rx_kill_vid()
413 bond_alb_clear_vlan(bond, vid); in bond_vlan_rx_kill_vid()
432 struct bonding *bond; in bond_ipsec_dev() local
438 bond = netdev_priv(bond_dev); in bond_ipsec_dev()
439 if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) in bond_ipsec_dev()
442 slave = rcu_dereference(bond->curr_active_slave); in bond_ipsec_dev()
468 struct bonding *bond; in bond_ipsec_add_sa() local
476 bond = netdev_priv(bond_dev); in bond_ipsec_add_sa()
477 slave = rcu_dereference(bond->curr_active_slave); in bond_ipsec_add_sa()
505 mutex_lock(&bond->ipsec_lock); in bond_ipsec_add_sa()
506 list_add(&ipsec->list, &bond->ipsec_list); in bond_ipsec_add_sa()
507 mutex_unlock(&bond->ipsec_lock); in bond_ipsec_add_sa()
516 static void bond_ipsec_add_sa_all(struct bonding *bond) in bond_ipsec_add_sa_all() argument
518 struct net_device *bond_dev = bond->dev; in bond_ipsec_add_sa_all()
523 slave = rtnl_dereference(bond->curr_active_slave); in bond_ipsec_add_sa_all()
528 mutex_lock(&bond->ipsec_lock); in bond_ipsec_add_sa_all()
532 if (!list_empty(&bond->ipsec_list)) in bond_ipsec_add_sa_all()
539 list_for_each_entry(ipsec, &bond->ipsec_list, list) { in bond_ipsec_add_sa_all()
551 mutex_unlock(&bond->ipsec_lock); in bond_ipsec_add_sa_all()
564 struct bonding *bond; in bond_ipsec_del_sa() local
571 bond = netdev_priv(bond_dev); in bond_ipsec_del_sa()
572 slave = rcu_dereference(bond->curr_active_slave); in bond_ipsec_del_sa()
595 mutex_lock(&bond->ipsec_lock); in bond_ipsec_del_sa()
596 list_for_each_entry(ipsec, &bond->ipsec_list, list) { in bond_ipsec_del_sa()
603 mutex_unlock(&bond->ipsec_lock); in bond_ipsec_del_sa()
606 static void bond_ipsec_del_sa_all(struct bonding *bond) in bond_ipsec_del_sa_all() argument
608 struct net_device *bond_dev = bond->dev; in bond_ipsec_del_sa_all()
613 slave = rtnl_dereference(bond->curr_active_slave); in bond_ipsec_del_sa_all()
618 mutex_lock(&bond->ipsec_lock); in bond_ipsec_del_sa_all()
619 list_for_each_entry(ipsec, &bond->ipsec_list, list) { in bond_ipsec_del_sa_all()
635 mutex_unlock(&bond->ipsec_lock); in bond_ipsec_del_sa_all()
643 struct bonding *bond; in bond_ipsec_free_sa() local
650 bond = netdev_priv(bond_dev); in bond_ipsec_free_sa()
651 slave = rcu_dereference(bond->curr_active_slave); in bond_ipsec_free_sa()
763 int bond_set_carrier(struct bonding *bond) in bond_set_carrier() argument
768 if (!bond_has_slaves(bond)) in bond_set_carrier()
771 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_set_carrier()
772 return bond_3ad_set_carrier(bond); in bond_set_carrier()
774 bond_for_each_slave(bond, slave, iter) { in bond_set_carrier()
776 if (!netif_carrier_ok(bond->dev)) { in bond_set_carrier()
777 netif_carrier_on(bond->dev); in bond_set_carrier()
785 if (netif_carrier_ok(bond->dev)) { in bond_set_carrier()
786 netif_carrier_off(bond->dev); in bond_set_carrier()
857 static int bond_check_dev_link(struct bonding *bond, in bond_check_dev_link() argument
868 if (bond->params.use_carrier) in bond_check_dev_link()
912 static int bond_set_promiscuity(struct bonding *bond, int inc) in bond_set_promiscuity() argument
917 if (bond_uses_primary(bond)) { in bond_set_promiscuity()
918 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); in bond_set_promiscuity()
925 bond_for_each_slave(bond, slave, iter) { in bond_set_promiscuity()
935 static int bond_set_allmulti(struct bonding *bond, int inc) in bond_set_allmulti() argument
940 if (bond_uses_primary(bond)) { in bond_set_allmulti()
941 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); in bond_set_allmulti()
948 bond_for_each_slave(bond, slave, iter) { in bond_set_allmulti()
963 struct bonding *bond = container_of(work, struct bonding, in bond_resend_igmp_join_requests_delayed() local
967 queue_delayed_work(bond->wq, &bond->mcast_work, 1); in bond_resend_igmp_join_requests_delayed()
970 call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev); in bond_resend_igmp_join_requests_delayed()
972 if (bond->igmp_retrans > 1) { in bond_resend_igmp_join_requests_delayed()
973 bond->igmp_retrans--; in bond_resend_igmp_join_requests_delayed()
974 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); in bond_resend_igmp_join_requests_delayed()
979 /* Flush bond's hardware addresses from slave */
983 struct bonding *bond = netdev_priv(bond_dev); in bond_hw_addr_flush() local
988 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_hw_addr_flush()
999 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, in bond_hw_addr_swap() argument
1003 if (bond->dev->flags & IFF_PROMISC) in bond_hw_addr_swap()
1006 if (bond->dev->flags & IFF_ALLMULTI) in bond_hw_addr_swap()
1009 if (bond->dev->flags & IFF_UP) in bond_hw_addr_swap()
1010 bond_hw_addr_flush(bond->dev, old_active->dev); in bond_hw_addr_swap()
1012 bond_slave_ns_maddrs_add(bond, old_active); in bond_hw_addr_swap()
1017 if (bond->dev->flags & IFF_PROMISC) in bond_hw_addr_swap()
1020 if (bond->dev->flags & IFF_ALLMULTI) in bond_hw_addr_swap()
1023 if (bond->dev->flags & IFF_UP) { in bond_hw_addr_swap()
1024 netif_addr_lock_bh(bond->dev); in bond_hw_addr_swap()
1025 dev_uc_sync(new_active->dev, bond->dev); in bond_hw_addr_swap()
1026 dev_mc_sync(new_active->dev, bond->dev); in bond_hw_addr_swap()
1027 netif_addr_unlock_bh(bond->dev); in bond_hw_addr_swap()
1030 bond_slave_ns_maddrs_del(bond, new_active); in bond_hw_addr_swap()
1035 * bond_set_dev_addr - clone slave's address to bond
1036 * @bond_dev: bond net device
1058 static struct slave *bond_get_old_active(struct bonding *bond, in bond_get_old_active() argument
1064 bond_for_each_slave(bond, slave, iter) { in bond_get_old_active()
1068 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) in bond_get_old_active()
1081 static void bond_do_fail_over_mac(struct bonding *bond, in bond_do_fail_over_mac() argument
1089 switch (bond->params.fail_over_mac) { in bond_do_fail_over_mac()
1092 rv = bond_set_dev_addr(bond->dev, new_active->dev); in bond_do_fail_over_mac()
1094 slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n", in bond_do_fail_over_mac()
1101 * if just new_active, set new_active to bond's MAC in bond_do_fail_over_mac()
1107 old_active = bond_get_old_active(bond, new_active); in bond_do_fail_over_mac()
1117 bond_hw_addr_copy(ss.__data, bond->dev->dev_addr, in bond_do_fail_over_mac()
1118 bond->dev->addr_len); in bond_do_fail_over_mac()
1119 ss.ss_family = bond->dev->type; in bond_do_fail_over_mac()
1125 slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n", in bond_do_fail_over_mac()
1140 slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n", in bond_do_fail_over_mac()
1145 netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n", in bond_do_fail_over_mac()
1146 bond->params.fail_over_mac); in bond_do_fail_over_mac()
1154 * @bond: our bonding struct
1163 static struct slave *bond_choose_primary_or_current(struct bonding *bond) in bond_choose_primary_or_current() argument
1165 struct slave *prim = rtnl_dereference(bond->primary_slave); in bond_choose_primary_or_current()
1166 struct slave *curr = rtnl_dereference(bond->curr_active_slave); in bond_choose_primary_or_current()
1171 bond_for_each_slave(bond, slave, iter) { in bond_choose_primary_or_current()
1189 if (bond->force_primary) { in bond_choose_primary_or_current()
1190 bond->force_primary = false; in bond_choose_primary_or_current()
1199 switch (bond->params.primary_reselect) { in bond_choose_primary_or_current()
1211 netdev_err(bond->dev, "impossible primary_reselect %d\n", in bond_choose_primary_or_current()
1212 bond->params.primary_reselect); in bond_choose_primary_or_current()
1219 * @bond: our bonding struct
1221 static struct slave *bond_find_best_slave(struct bonding *bond) in bond_find_best_slave() argument
1225 int mintime = bond->params.updelay; in bond_find_best_slave()
1227 slave = bond_choose_primary_or_current(bond); in bond_find_best_slave()
1231 bond_for_each_slave(bond, slave, iter) { in bond_find_best_slave()
1245 static bool bond_should_notify_peers(struct bonding *bond) in bond_should_notify_peers() argument
1247 struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave); in bond_should_notify_peers()
1249 if (!slave || !bond->send_peer_notif || in bond_should_notify_peers()
1250 bond->send_peer_notif % in bond_should_notify_peers()
1251 max(1, bond->params.peer_notif_delay) != 0 || in bond_should_notify_peers()
1252 !netif_carrier_ok(bond->dev) || in bond_should_notify_peers()
1256 netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", in bond_should_notify_peers()
1264 * @bond: our bonding struct
1267 * Set the new slave to the bond's settings and unset them on the old
1277 void bond_change_active_slave(struct bonding *bond, struct slave *new_active) in bond_change_active_slave() argument
1283 old_active = rtnl_dereference(bond->curr_active_slave); in bond_change_active_slave()
1289 bond_ipsec_del_sa_all(bond); in bond_change_active_slave()
1296 if (bond_uses_primary(bond)) { in bond_change_active_slave()
1297 slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n", in bond_change_active_slave()
1298 (bond->params.updelay - new_active->delay) * bond->params.miimon); in bond_change_active_slave()
1305 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_change_active_slave()
1308 if (bond_is_lb(bond)) in bond_change_active_slave()
1309 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); in bond_change_active_slave()
1311 if (bond_uses_primary(bond)) in bond_change_active_slave()
1312 slave_info(bond->dev, new_active->dev, "making interface the new active one\n"); in bond_change_active_slave()
1316 if (bond_uses_primary(bond)) in bond_change_active_slave()
1317 bond_hw_addr_swap(bond, new_active, old_active); in bond_change_active_slave()
1319 if (bond_is_lb(bond)) { in bond_change_active_slave()
1320 bond_alb_handle_active_change(bond, new_active); in bond_change_active_slave()
1328 rcu_assign_pointer(bond->curr_active_slave, new_active); in bond_change_active_slave()
1331 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { in bond_change_active_slave()
1342 if (bond->params.fail_over_mac) in bond_change_active_slave()
1343 bond_do_fail_over_mac(bond, new_active, in bond_change_active_slave()
1346 if (netif_running(bond->dev)) { in bond_change_active_slave()
1347 bond->send_peer_notif = in bond_change_active_slave()
1348 bond->params.num_peer_notif * in bond_change_active_slave()
1349 max(1, bond->params.peer_notif_delay); in bond_change_active_slave()
1351 bond_should_notify_peers(bond); in bond_change_active_slave()
1354 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); in bond_change_active_slave()
1356 bond->send_peer_notif--; in bond_change_active_slave()
1358 bond->dev); in bond_change_active_slave()
1364 bond_ipsec_add_sa_all(bond); in bond_change_active_slave()
1369 * resend only if bond is brought up with the affected in bond_change_active_slave()
1372 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && in bond_change_active_slave()
1373 ((bond_uses_primary(bond) && new_active) || in bond_change_active_slave()
1374 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) { in bond_change_active_slave()
1375 bond->igmp_retrans = bond->params.resend_igmp; in bond_change_active_slave()
1376 queue_delayed_work(bond->wq, &bond->mcast_work, 1); in bond_change_active_slave()
1382 * @bond: our bonding struct
1391 void bond_select_active_slave(struct bonding *bond) in bond_select_active_slave() argument
1398 best_slave = bond_find_best_slave(bond); in bond_select_active_slave()
1399 if (best_slave != rtnl_dereference(bond->curr_active_slave)) { in bond_select_active_slave()
1400 bond_change_active_slave(bond, best_slave); in bond_select_active_slave()
1401 rv = bond_set_carrier(bond); in bond_select_active_slave()
1405 if (netif_carrier_ok(bond->dev)) in bond_select_active_slave()
1406 netdev_info(bond->dev, "active interface up!\n"); in bond_select_active_slave()
1408 netdev_info(bond->dev, "now running without any active interface!\n"); in bond_select_active_slave()
1446 struct bonding *bond = netdev_priv(bond_dev); in bond_poll_controller() local
1451 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_poll_controller()
1452 if (bond_3ad_get_active_agg_info(bond, &ad_info)) in bond_poll_controller()
1455 bond_for_each_slave_rcu(bond, slave, iter) { in bond_poll_controller()
1459 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_poll_controller()
1474 struct bonding *bond = netdev_priv(bond_dev); in bond_netpoll_cleanup() local
1478 bond_for_each_slave(bond, slave, iter) in bond_netpoll_cleanup()
1485 struct bonding *bond = netdev_priv(dev); in bond_netpoll_setup() local
1490 bond_for_each_slave(bond, slave, iter) { in bond_netpoll_setup()
1517 struct bonding *bond = netdev_priv(dev); in bond_fix_features() local
1525 bond_for_each_slave(bond, slave, iter) { in bond_fix_features()
1550 static void bond_compute_features(struct bonding *bond) in bond_compute_features() argument
1561 struct net_device *bond_dev = bond->dev; in bond_compute_features()
1568 if (!bond_has_slaves(bond)) in bond_compute_features()
1574 bond_for_each_slave(bond, slave, iter) { in bond_compute_features()
1656 struct bonding *bond) in bond_should_deliver_exact_match() argument
1659 if (BOND_MODE(bond) == BOND_MODE_ALB && in bond_should_deliver_exact_match()
1672 struct bonding *bond; in bond_handle_frame() local
1684 bond = slave->bond; in bond_handle_frame()
1686 recv_probe = READ_ONCE(bond->recv_probe); in bond_handle_frame()
1688 ret = recv_probe(skb, bond, slave); in bond_handle_frame()
1706 if (bond_should_deliver_exact_match(skb, slave, bond)) { in bond_handle_frame()
1712 skb->dev = bond->dev; in bond_handle_frame()
1714 if (BOND_MODE(bond) == BOND_MODE_ALB && in bond_handle_frame()
1715 netif_is_bridge_port(bond->dev) && in bond_handle_frame()
1723 bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, in bond_handle_frame()
1724 bond->dev->addr_len); in bond_handle_frame()
1730 static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) in bond_lag_tx_type() argument
1732 switch (BOND_MODE(bond)) { in bond_lag_tx_type()
1747 static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond, in bond_lag_hash_type() argument
1753 switch (bond->params.xmit_policy) { in bond_lag_hash_type()
1771 static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave, in bond_master_upper_dev_link() argument
1778 type = bond_lag_tx_type(bond); in bond_master_upper_dev_link()
1780 lag_upper_info.hash_type = bond_lag_hash_type(bond, type); in bond_master_upper_dev_link()
1782 err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave, in bond_master_upper_dev_link()
1791 static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave) in bond_upper_dev_unlink() argument
1793 netdev_upper_dev_unlink(slave->dev, bond->dev); in bond_upper_dev_unlink()
1800 struct bonding *bond = bond_get_bond_by_slave(slave); in slave_kobj_release() local
1803 if (BOND_MODE(bond) == BOND_MODE_8023AD) in slave_kobj_release()
1828 static struct slave *bond_alloc_slave(struct bonding *bond, in bond_alloc_slave() argument
1837 slave->bond = bond; in bond_alloc_slave()
1844 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_alloc_slave()
1856 static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) in bond_fill_ifbond() argument
1858 info->bond_mode = BOND_MODE(bond); in bond_fill_ifbond()
1859 info->miimon = bond->params.miimon; in bond_fill_ifbond()
1860 info->num_slaves = bond->slave_cnt; in bond_fill_ifbond()
1880 bond_fill_ifbond(slave->bond, &binfo.master); in bond_netdev_notify_work()
1884 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1); in bond_netdev_notify_work()
1890 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0); in bond_queue_slave_event()
1917 /* The bonding driver uses ether_setup() to convert a master bond device
1933 struct bonding *bond = netdev_priv(bond_dev); in bond_xdp_set_features() local
1940 if (!bond_xdp_check(bond, BOND_MODE(bond)) || !bond_has_slaves(bond)) { in bond_xdp_set_features()
1945 bond_for_each_slave(bond, slave, iter) in bond_xdp_set_features()
1953 /* enslave device <slave> to bond device <master> */
1957 struct bonding *bond = netdev_priv(bond_dev); in bond_enslave() local
1971 if (!bond->params.use_carrier && in bond_enslave()
1985 BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself."); in bond_enslave()
1995 "Can not enslave VLAN challenged device to VLAN enabled bond"); in bond_enslave()
1998 …, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n"); in bond_enslave()
2022 * bond ether type mutual exclusion - don't allow slaves of dissimilar in bond_enslave()
2023 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond in bond_enslave()
2025 if (!bond_has_slaves(bond)) { in bond_enslave()
2057 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { in bond_enslave()
2067 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && in bond_enslave()
2068 bond->params.fail_over_mac != BOND_FOM_ACTIVE) { in bond_enslave()
2069 if (!bond_has_slaves(bond)) { in bond_enslave()
2070 bond->params.fail_over_mac = BOND_FOM_ACTIVE; in bond_enslave()
2086 if (!bond_has_slaves(bond) && in bond_enslave()
2087 bond->dev->addr_assign_type == NET_ADDR_RANDOM) { in bond_enslave()
2088 res = bond_set_dev_addr(bond->dev, slave_dev); in bond_enslave()
2093 new_slave = bond_alloc_slave(bond, slave_dev); in bond_enslave()
2104 /* Save slave's original mtu and then set it to match the bond */ in bond_enslave()
2106 res = dev_set_mtu(slave_dev, bond->dev->mtu); in bond_enslave()
2119 if (!bond->params.fail_over_mac || in bond_enslave()
2120 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { in bond_enslave()
2148 if (bond_is_lb(bond)) { in bond_enslave()
2152 res = bond_alb_init_slave(bond, new_slave); in bond_enslave()
2159 slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n"); in bond_enslave()
2163 prev_slave = bond_last_slave(bond); in bond_enslave()
2169 bond_needs_speed_duplex(bond)) in bond_enslave()
2173 (msecs_to_jiffies(bond->params.arp_interval) + 1); in bond_enslave()
2179 if (bond->params.miimon && !bond->params.use_carrier) { in bond_enslave()
2180 link_reporting = bond_check_dev_link(bond, slave_dev, 1); in bond_enslave()
2182 if ((link_reporting == -1) && !bond->params.arp_interval) { in bond_enslave()
2200 if (bond->params.miimon) { in bond_enslave()
2201 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { in bond_enslave()
2202 if (bond->params.updelay) { in bond_enslave()
2206 new_slave->delay = bond->params.updelay; in bond_enslave()
2216 } else if (bond->params.arp_interval) { in bond_enslave()
2232 if (bond_uses_primary(bond) && bond->params.primary[0]) { in bond_enslave()
2234 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { in bond_enslave()
2235 rcu_assign_pointer(bond->primary_slave, new_slave); in bond_enslave()
2236 bond->force_primary = true; in bond_enslave()
2240 switch (BOND_MODE(bond)) { in bond_enslave()
2255 * can be called only after the mac address of the bond is set in bond_enslave()
2257 bond_3ad_initialize(bond); in bond_enslave()
2277 * anyway (it holds no special properties of the bond device), in bond_enslave()
2280 if (!rcu_access_pointer(bond->curr_active_slave) && in bond_enslave()
2282 rcu_assign_pointer(bond->curr_active_slave, new_slave); in bond_enslave()
2288 if (bond->dev->npinfo) { in bond_enslave()
2307 res = bond_master_upper_dev_link(bond, new_slave, extack); in bond_enslave()
2324 if (!bond_uses_primary(bond)) { in bond_enslave()
2348 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_enslave()
2353 bond->slave_cnt++; in bond_enslave()
2354 bond_compute_features(bond); in bond_enslave()
2355 bond_set_carrier(bond); in bond_enslave()
2360 bond_slave_ns_maddrs_add(bond, new_slave); in bond_enslave()
2362 if (bond_uses_primary(bond)) { in bond_enslave()
2364 bond_select_active_slave(bond); in bond_enslave()
2368 if (bond_mode_can_use_xmit_hash(bond)) in bond_enslave()
2369 bond_update_slave_arr(bond, NULL); in bond_enslave()
2373 if (bond->xdp_prog) { in bond_enslave()
2379 } else if (bond->xdp_prog) { in bond_enslave()
2383 .prog = bond->xdp_prog, in bond_enslave()
2400 if (bond->xdp_prog) in bond_enslave()
2401 bpf_prog_inc(bond->xdp_prog); in bond_enslave()
2419 bond_upper_dev_unlink(bond, new_slave); in bond_enslave()
2426 if (rcu_access_pointer(bond->primary_slave) == new_slave) in bond_enslave()
2427 RCU_INIT_POINTER(bond->primary_slave, NULL); in bond_enslave()
2428 if (rcu_access_pointer(bond->curr_active_slave) == new_slave) { in bond_enslave()
2430 bond_change_active_slave(bond, NULL); in bond_enslave()
2431 bond_select_active_slave(bond); in bond_enslave()
2445 if (!bond->params.fail_over_mac || in bond_enslave()
2446 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { in bond_enslave()
2448 * MAC if this slave's MAC is in use by the bond, or at in bond_enslave()
2465 if (!bond_has_slaves(bond)) { in bond_enslave()
2478 /* Try to release the slave device <slave> from the bond device <master>
2481 * while destroying a bond interface and all slaves are being released.
2493 struct bonding *bond = netdev_priv(bond_dev); in __bond_release_one() local
2508 slave = bond_get_slave_by_dev(bond, slave_dev); in __bond_release_one()
2510 /* not a slave of this bond */ in __bond_release_one()
2521 bond_get_stats(bond->dev, &bond->bond_stats); in __bond_release_one()
2523 if (bond->xdp_prog) { in __bond_release_one()
2539 if (BOND_MODE(bond) == BOND_MODE_8023AD) in __bond_release_one()
2542 bond_upper_dev_unlink(bond, slave); in __bond_release_one()
2544 if (bond_mode_can_use_xmit_hash(bond)) in __bond_release_one()
2545 bond_update_slave_arr(bond, slave); in __bond_release_one()
2550 oldcurrent = rcu_access_pointer(bond->curr_active_slave); in __bond_release_one()
2552 RCU_INIT_POINTER(bond->current_arp_slave, NULL); in __bond_release_one()
2554 if (!all && (!bond->params.fail_over_mac || in __bond_release_one()
2555 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) { in __bond_release_one()
2557 bond_has_slaves(bond)) in __bond_release_one()
2558 …slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - s… in __bond_release_one()
2562 if (rtnl_dereference(bond->primary_slave) == slave) in __bond_release_one()
2563 RCU_INIT_POINTER(bond->primary_slave, NULL); in __bond_release_one()
2566 bond_change_active_slave(bond, NULL); in __bond_release_one()
2572 bond_slave_ns_maddrs_del(bond, slave); in __bond_release_one()
2574 if (bond_is_lb(bond)) { in __bond_release_one()
2580 bond_alb_deinit_slave(bond, slave); in __bond_release_one()
2584 RCU_INIT_POINTER(bond->curr_active_slave, NULL); in __bond_release_one()
2590 bond_select_active_slave(bond); in __bond_release_one()
2593 bond_set_carrier(bond); in __bond_release_one()
2594 if (!bond_has_slaves(bond)) in __bond_release_one()
2599 bond->slave_cnt--; in __bond_release_one()
2601 if (!bond_has_slaves(bond)) { in __bond_release_one()
2602 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); in __bond_release_one()
2603 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); in __bond_release_one()
2606 bond_compute_features(bond); in __bond_release_one()
2609 …slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n… in __bond_release_one()
2616 if (!bond_uses_primary(bond)) { in __bond_release_one()
2642 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE || in __bond_release_one()
2643 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { in __bond_release_one()
2671 /* First release a slave and then destroy the bond if no more slaves are left.
2677 struct bonding *bond = netdev_priv(bond_dev); in bond_release_and_destroy() local
2681 if (ret == 0 && !bond_has_slaves(bond) && in bond_release_and_destroy()
2684 netdev_info(bond_dev, "Destroying bond\n"); in bond_release_and_destroy()
2685 bond_remove_proc_entry(bond); in bond_release_and_destroy()
2693 struct bonding *bond = netdev_priv(bond_dev); in bond_info_query() local
2695 bond_fill_ifbond(bond, info); in bond_info_query()
2700 struct bonding *bond = netdev_priv(bond_dev); in bond_slave_info_query() local
2705 bond_for_each_slave(bond, slave, iter) { in bond_slave_info_query()
2719 static int bond_miimon_inspect(struct bonding *bond) in bond_miimon_inspect() argument
2726 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { in bond_miimon_inspect()
2727 ignore_updelay = !rcu_dereference(bond->curr_active_slave); in bond_miimon_inspect()
2731 usable_slaves = rcu_dereference(bond->usable_slaves); in bond_miimon_inspect()
2737 bond_for_each_slave_rcu(bond, slave, iter) { in bond_miimon_inspect()
2740 link_state = bond_check_dev_link(bond, slave->dev, 0); in bond_miimon_inspect()
2749 slave->delay = bond->params.downdelay; in bond_miimon_inspect()
2751 slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n", in bond_miimon_inspect()
2752 (BOND_MODE(bond) == in bond_miimon_inspect()
2756 bond->params.downdelay * bond->params.miimon); in bond_miimon_inspect()
2765 slave_info(bond->dev, slave->dev, "link status up again after %d ms\n", in bond_miimon_inspect()
2766 (bond->params.downdelay - slave->delay) * in bond_miimon_inspect()
2767 bond->params.miimon); in bond_miimon_inspect()
2787 slave->delay = bond->params.updelay; in bond_miimon_inspect()
2790 slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n", in bond_miimon_inspect()
2792 bond->params.updelay * in bond_miimon_inspect()
2793 bond->params.miimon); in bond_miimon_inspect()
2800 slave_info(bond->dev, slave->dev, "link status down again after %d ms\n", in bond_miimon_inspect()
2801 (bond->params.updelay - slave->delay) * in bond_miimon_inspect()
2802 bond->params.miimon); in bond_miimon_inspect()
2825 static void bond_miimon_link_change(struct bonding *bond, in bond_miimon_link_change() argument
2829 switch (BOND_MODE(bond)) { in bond_miimon_link_change()
2835 bond_alb_handle_link_change(bond, slave, link); in bond_miimon_link_change()
2838 bond_update_slave_arr(bond, NULL); in bond_miimon_link_change()
2843 static void bond_miimon_commit(struct bonding *bond) in bond_miimon_commit() argument
2851 bond_for_each_slave(bond, slave, iter) { in bond_miimon_commit()
2860 if (BOND_MODE(bond) == BOND_MODE_8023AD && in bond_miimon_commit()
2867 bond_needs_speed_duplex(bond)) { in bond_miimon_commit()
2870 slave_warn(bond->dev, slave->dev, in bond_miimon_commit()
2878 primary = rtnl_dereference(bond->primary_slave); in bond_miimon_commit()
2879 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_miimon_commit()
2882 } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { in bond_miimon_commit()
2887 slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n", in bond_miimon_commit()
2891 bond_miimon_link_change(bond, slave, BOND_LINK_UP); in bond_miimon_commit()
2893 active = rtnl_dereference(bond->curr_active_slave); in bond_miimon_commit()
2906 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || in bond_miimon_commit()
2907 BOND_MODE(bond) == BOND_MODE_8023AD) in bond_miimon_commit()
2911 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n"); in bond_miimon_commit()
2913 bond_miimon_link_change(bond, slave, BOND_LINK_DOWN); in bond_miimon_commit()
2915 if (slave == rcu_access_pointer(bond->curr_active_slave)) in bond_miimon_commit()
2921 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n", in bond_miimon_commit()
2931 bond_select_active_slave(bond); in bond_miimon_commit()
2935 bond_set_carrier(bond); in bond_miimon_commit()
2947 struct bonding *bond = container_of(work, struct bonding, in bond_mii_monitor() local
2955 delay = msecs_to_jiffies(bond->params.miimon); in bond_mii_monitor()
2957 if (!bond_has_slaves(bond)) in bond_mii_monitor()
2961 should_notify_peers = bond_should_notify_peers(bond); in bond_mii_monitor()
2962 commit = !!bond_miimon_inspect(bond); in bond_mii_monitor()
2963 if (bond->send_peer_notif) { in bond_mii_monitor()
2966 bond->send_peer_notif--; in bond_mii_monitor()
2981 bond_for_each_slave(bond, slave, iter) { in bond_mii_monitor()
2984 bond_miimon_commit(bond); in bond_mii_monitor()
2990 if (bond->params.miimon) in bond_mii_monitor()
2991 queue_delayed_work(bond->wq, &bond->mii_work, delay); in bond_mii_monitor()
2996 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); in bond_mii_monitor()
3009 static bool bond_has_this_ip(struct bonding *bond, __be32 ip) in bond_has_this_ip() argument
3016 if (ip == bond_confirm_addr(bond->dev, 0, ip)) in bond_has_this_ip()
3020 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv)) in bond_has_this_ip()
3032 struct net_device *bond_dev = slave->bond->dev; in bond_handle_vlan()
3077 struct net_device *bond_dev = slave->bond->dev; in bond_arp_send()
3140 static void bond_arp_send_all(struct bonding *bond, struct slave *slave) in bond_arp_send_all() argument
3144 __be32 *targets = bond->params.arp_targets, addr; in bond_arp_send_all()
3148 slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n", in bond_arp_send_all()
3153 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 0, 0, in bond_arp_send_all()
3159 if (bond->params.arp_validate) in bond_arp_send_all()
3161 bond->dev->name, in bond_arp_send_all()
3168 /* bond device itself */ in bond_arp_send_all()
3169 if (rt->dst.dev == bond->dev) in bond_arp_send_all()
3173 tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0); in bond_arp_send_all()
3180 slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n", in bond_arp_send_all()
3194 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) in bond_validate_arp() argument
3198 if (!sip || !bond_has_this_ip(bond, tip)) { in bond_validate_arp()
3199 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n", in bond_validate_arp()
3204 i = bond_get_targets_ip(bond->params.arp_targets, sip); in bond_validate_arp()
3206 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n", in bond_validate_arp()
3214 static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, in bond_arp_rcv() argument
3223 alen = arp_hdr_len(bond->dev); in bond_arp_rcv()
3233 if (arp->ar_hln != bond->dev->addr_len || in bond_arp_rcv()
3242 arp_ptr += bond->dev->addr_len; in bond_arp_rcv()
3244 arp_ptr += 4 + bond->dev->addr_len; in bond_arp_rcv()
3247 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n", in bond_arp_rcv()
3249 bond->params.arp_validate, slave_do_arp_validate(bond, slave), in bond_arp_rcv()
3252 curr_active_slave = rcu_dereference(bond->curr_active_slave); in bond_arp_rcv()
3253 curr_arp_slave = rcu_dereference(bond->current_arp_slave); in bond_arp_rcv()
3279 bond_validate_arp(bond, slave, sip, tip); in bond_arp_rcv()
3281 time_after(slave_last_rx(bond, curr_active_slave), in bond_arp_rcv()
3283 bond_validate_arp(bond, slave, tip, sip); in bond_arp_rcv()
3285 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1)) in bond_arp_rcv()
3286 bond_validate_arp(bond, slave, sip, tip); in bond_arp_rcv()
3298 struct net_device *bond_dev = slave->bond->dev; in bond_ns_send()
3319 static void bond_ns_send_all(struct bonding *bond, struct slave *slave) in bond_ns_send_all() argument
3321 struct in6_addr *targets = bond->params.ns_targets; in bond_ns_send_all()
3329 slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n", in bond_ns_send_all()
3336 fl6.flowi6_oif = bond->dev->ifindex; in bond_ns_send_all()
3338 dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6); in bond_ns_send_all()
3344 if (bond->params.arp_validate) in bond_ns_send_all()
3346 bond->dev->name, in bond_ns_send_all()
3352 /* bond device itself */ in bond_ns_send_all()
3353 if (dst->dev == bond->dev) in bond_ns_send_all()
3357 tags = bond_verify_device_path(bond->dev, dst->dev, 0); in bond_ns_send_all()
3364 slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n", in bond_ns_send_all()
3389 static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr) in bond_has_this_ip6() argument
3396 if (bond_confirm_addr6(bond->dev, &priv)) in bond_has_this_ip6()
3400 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv)) in bond_has_this_ip6()
3407 static void bond_validate_na(struct bonding *bond, struct slave *slave, in bond_validate_na() argument
3415 * exist on bond interface. in bond_validate_na()
3419 !bond_has_this_ip6(bond, daddr))) { in bond_validate_na()
3420 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n", in bond_validate_na()
3425 i = bond_get_targets_ip6(bond->params.ns_targets, saddr); in bond_validate_na()
3427 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n", in bond_validate_na()
3435 static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, in bond_na_rcv() argument
3458 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n", in bond_na_rcv()
3460 bond->params.arp_validate, slave_do_arp_validate(bond, slave), in bond_na_rcv()
3463 curr_active_slave = rcu_dereference(bond->curr_active_slave); in bond_na_rcv()
3464 curr_arp_slave = rcu_dereference(bond->current_arp_slave); in bond_na_rcv()
3470 bond_validate_na(bond, slave, saddr, daddr); in bond_na_rcv()
3472 time_after(slave_last_rx(bond, curr_active_slave), in bond_na_rcv()
3474 bond_validate_na(bond, slave, daddr, saddr); in bond_na_rcv()
3476 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1)) in bond_na_rcv()
3477 bond_validate_na(bond, slave, saddr, daddr); in bond_na_rcv()
3484 int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, in bond_rcv_validate() argument
3492 slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n", in bond_rcv_validate()
3496 if (!slave_do_arp_validate(bond, slave)) { in bond_rcv_validate()
3497 if ((slave_do_arp_validate_only(bond) && is_arp) || in bond_rcv_validate()
3499 (slave_do_arp_validate_only(bond) && is_ipv6) || in bond_rcv_validate()
3501 !slave_do_arp_validate_only(bond)) in bond_rcv_validate()
3505 return bond_arp_rcv(skb, bond, slave); in bond_rcv_validate()
3508 return bond_na_rcv(skb, bond, slave); in bond_rcv_validate()
3515 static void bond_send_validate(struct bonding *bond, struct slave *slave) in bond_send_validate() argument
3517 bond_arp_send_all(bond, slave); in bond_send_validate()
3519 bond_ns_send_all(bond, slave); in bond_send_validate()
3527 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, in bond_time_in_interval() argument
3530 int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); in bond_time_in_interval()
3543 static void bond_loadbalance_arp_mon(struct bonding *bond) in bond_loadbalance_arp_mon() argument
3549 if (!bond_has_slaves(bond)) in bond_loadbalance_arp_mon()
3554 oldcurrent = rcu_dereference(bond->curr_active_slave); in bond_loadbalance_arp_mon()
3563 bond_for_each_slave_rcu(bond, slave, iter) { in bond_loadbalance_arp_mon()
3569 if (bond_time_in_interval(bond, last_tx, 1) && in bond_loadbalance_arp_mon()
3570 bond_time_in_interval(bond, slave->last_rx, 1)) { in bond_loadbalance_arp_mon()
3581 slave_info(bond->dev, slave->dev, "link status definitely up\n"); in bond_loadbalance_arp_mon()
3584 slave_info(bond->dev, slave->dev, "interface is now up\n"); in bond_loadbalance_arp_mon()
3594 if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || in bond_loadbalance_arp_mon()
3595 !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) { in bond_loadbalance_arp_mon()
3603 slave_info(bond->dev, slave->dev, "interface is now down\n"); in bond_loadbalance_arp_mon()
3618 bond_send_validate(bond, slave); in bond_loadbalance_arp_mon()
3627 bond_for_each_slave(bond, slave, iter) { in bond_loadbalance_arp_mon()
3633 bond_slave_state_change(bond); in bond_loadbalance_arp_mon()
3634 if (BOND_MODE(bond) == BOND_MODE_XOR) in bond_loadbalance_arp_mon()
3635 bond_update_slave_arr(bond, NULL); in bond_loadbalance_arp_mon()
3639 bond_select_active_slave(bond); in bond_loadbalance_arp_mon()
3646 if (bond->params.arp_interval) in bond_loadbalance_arp_mon()
3647 queue_delayed_work(bond->wq, &bond->arp_work, in bond_loadbalance_arp_mon()
3648 msecs_to_jiffies(bond->params.arp_interval)); in bond_loadbalance_arp_mon()
3658 static int bond_ab_arp_inspect(struct bonding *bond) in bond_ab_arp_inspect() argument
3665 bond_for_each_slave_rcu(bond, slave, iter) { in bond_ab_arp_inspect()
3667 last_rx = slave_last_rx(bond, slave); in bond_ab_arp_inspect()
3670 if (bond_time_in_interval(bond, last_rx, 1)) { in bond_ab_arp_inspect()
3684 if (bond_time_in_interval(bond, slave->last_link_up, 2)) in bond_ab_arp_inspect()
3690 * - the bond has an IP address in bond_ab_arp_inspect()
3700 !rcu_access_pointer(bond->current_arp_slave) && in bond_ab_arp_inspect()
3701 !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) { in bond_ab_arp_inspect()
3709 * the bond has an IP address) in bond_ab_arp_inspect()
3713 (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || in bond_ab_arp_inspect()
3714 !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) { in bond_ab_arp_inspect()
3728 static void bond_ab_arp_commit(struct bonding *bond) in bond_ab_arp_commit() argument
3735 bond_for_each_slave(bond, slave, iter) { in bond_ab_arp_commit()
3742 if (rtnl_dereference(bond->curr_active_slave) != slave || in bond_ab_arp_commit()
3743 (!rtnl_dereference(bond->curr_active_slave) && in bond_ab_arp_commit()
3744 bond_time_in_interval(bond, last_tx, 1))) { in bond_ab_arp_commit()
3747 current_arp_slave = rtnl_dereference(bond->current_arp_slave); in bond_ab_arp_commit()
3754 RCU_INIT_POINTER(bond->current_arp_slave, NULL); in bond_ab_arp_commit()
3757 slave_info(bond->dev, slave->dev, "link status definitely up\n"); in bond_ab_arp_commit()
3759 if (!rtnl_dereference(bond->curr_active_slave) || in bond_ab_arp_commit()
3760 slave == rtnl_dereference(bond->primary_slave) || in bond_ab_arp_commit()
3761 slave->prio > rtnl_dereference(bond->curr_active_slave)->prio) in bond_ab_arp_commit()
3777 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n"); in bond_ab_arp_commit()
3779 if (slave == rtnl_dereference(bond->curr_active_slave)) { in bond_ab_arp_commit()
3780 RCU_INIT_POINTER(bond->current_arp_slave, NULL); in bond_ab_arp_commit()
3795 if (rtnl_dereference(bond->curr_active_slave)) in bond_ab_arp_commit()
3796 RCU_INIT_POINTER(bond->current_arp_slave, NULL); in bond_ab_arp_commit()
3800 slave_err(bond->dev, slave->dev, in bond_ab_arp_commit()
3809 bond_select_active_slave(bond); in bond_ab_arp_commit()
3813 bond_set_carrier(bond); in bond_ab_arp_commit()
3820 static bool bond_ab_arp_probe(struct bonding *bond) in bond_ab_arp_probe() argument
3823 *curr_arp_slave = rcu_dereference(bond->current_arp_slave), in bond_ab_arp_probe()
3824 *curr_active_slave = rcu_dereference(bond->curr_active_slave); in bond_ab_arp_probe()
3830 netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n", in bond_ab_arp_probe()
3835 bond_send_validate(bond, curr_active_slave); in bond_ab_arp_probe()
3845 curr_arp_slave = bond_first_slave_rcu(bond); in bond_ab_arp_probe()
3850 bond_for_each_slave_rcu(bond, slave, iter) { in bond_ab_arp_probe()
3872 slave_info(bond->dev, slave->dev, "backup interface is now down\n"); in bond_ab_arp_probe()
3887 bond_send_validate(bond, new_slave); in bond_ab_arp_probe()
3889 rcu_assign_pointer(bond->current_arp_slave, new_slave); in bond_ab_arp_probe()
3892 bond_for_each_slave_rcu(bond, slave, iter) { in bond_ab_arp_probe()
3901 static void bond_activebackup_arp_mon(struct bonding *bond) in bond_activebackup_arp_mon() argument
3907 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); in bond_activebackup_arp_mon()
3909 if (!bond_has_slaves(bond)) in bond_activebackup_arp_mon()
3914 should_notify_peers = bond_should_notify_peers(bond); in bond_activebackup_arp_mon()
3916 if (bond_ab_arp_inspect(bond)) { in bond_activebackup_arp_mon()
3926 bond_ab_arp_commit(bond); in bond_activebackup_arp_mon()
3932 should_notify_rtnl = bond_ab_arp_probe(bond); in bond_activebackup_arp_mon()
3936 if (bond->params.arp_interval) in bond_activebackup_arp_mon()
3937 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); in bond_activebackup_arp_mon()
3944 bond->send_peer_notif--; in bond_activebackup_arp_mon()
3946 bond->dev); in bond_activebackup_arp_mon()
3949 bond_slave_state_notify(bond); in bond_activebackup_arp_mon()
3950 bond_slave_link_notify(bond); in bond_activebackup_arp_mon()
3959 struct bonding *bond = container_of(work, struct bonding, in bond_arp_monitor() local
3962 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) in bond_arp_monitor()
3963 bond_activebackup_arp_mon(bond); in bond_arp_monitor()
3965 bond_loadbalance_arp_mon(bond); in bond_arp_monitor()
3971 static int bond_event_changename(struct bonding *bond) in bond_event_changename() argument
3973 bond_remove_proc_entry(bond); in bond_event_changename()
3974 bond_create_proc_entry(bond); in bond_event_changename()
3976 bond_debug_reregister(bond); in bond_event_changename()
4011 struct bonding *bond; in bond_slave_netdev_event() local
4023 bond_dev = slave->bond->dev; in bond_slave_netdev_event()
4024 bond = slave->bond; in bond_slave_netdev_event()
4025 primary = rtnl_dereference(bond->primary_slave); in bond_slave_netdev_event()
4046 BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_slave_netdev_event()
4053 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_slave_netdev_event()
4065 if (bond_mode_can_use_xmit_hash(bond)) in bond_slave_netdev_event()
4066 bond_update_slave_arr(bond, NULL); in bond_slave_netdev_event()
4071 * an active-backup bond, slaves need in bond_slave_netdev_event()
4083 if (!bond_uses_primary(bond) || in bond_slave_netdev_event()
4084 !bond->params.primary[0]) in bond_slave_netdev_event()
4089 RCU_INIT_POINTER(bond->primary_slave, NULL); in bond_slave_netdev_event()
4090 } else if (!strcmp(slave_dev->name, bond->params.primary)) { in bond_slave_netdev_event()
4092 rcu_assign_pointer(bond->primary_slave, slave); in bond_slave_netdev_event()
4097 netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n", in bond_slave_netdev_event()
4101 bond_select_active_slave(bond); in bond_slave_netdev_event()
4105 if (!bond->notifier_ctx) { in bond_slave_netdev_event()
4106 bond->notifier_ctx = true; in bond_slave_netdev_event()
4107 bond_compute_features(bond); in bond_slave_netdev_event()
4108 bond->notifier_ctx = false; in bond_slave_netdev_event()
4113 call_netdevice_notifiers(event, slave->bond->dev); in bond_slave_netdev_event()
4249 /* Extract the appropriate headers based on bond's xmit policy */
4250 static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data, in bond_flow_dissect() argument
4253 bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34; in bond_flow_dissect()
4256 switch (bond->params.xmit_policy) { in bond_flow_dissect()
4314 static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data, in __bond_xmit_hash() argument
4320 if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC) in __bond_xmit_hash()
4323 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || in __bond_xmit_hash()
4324 !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow)) in __bond_xmit_hash()
4327 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || in __bond_xmit_hash()
4328 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) { in __bond_xmit_hash()
4337 return bond_ip_hash(hash, &flow, bond->params.xmit_policy); in __bond_xmit_hash()
4342 * @bond: bonding device
4348 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) in bond_xmit_hash() argument
4350 if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 && in bond_xmit_hash()
4354 return __bond_xmit_hash(bond, skb, skb->data, skb->protocol, in bond_xmit_hash()
4361 * @bond: bonding device
4366 static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp) in bond_xmit_hash_xdp() argument
4375 return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0, in bond_xmit_hash_xdp()
4381 void bond_work_init_all(struct bonding *bond) in bond_work_init_all() argument
4383 INIT_DELAYED_WORK(&bond->mcast_work, in bond_work_init_all()
4385 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); in bond_work_init_all()
4386 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); in bond_work_init_all()
4387 INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor); in bond_work_init_all()
4388 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); in bond_work_init_all()
4389 INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler); in bond_work_init_all()
4392 static void bond_work_cancel_all(struct bonding *bond) in bond_work_cancel_all() argument
4394 cancel_delayed_work_sync(&bond->mii_work); in bond_work_cancel_all()
4395 cancel_delayed_work_sync(&bond->arp_work); in bond_work_cancel_all()
4396 cancel_delayed_work_sync(&bond->alb_work); in bond_work_cancel_all()
4397 cancel_delayed_work_sync(&bond->ad_work); in bond_work_cancel_all()
4398 cancel_delayed_work_sync(&bond->mcast_work); in bond_work_cancel_all()
4399 cancel_delayed_work_sync(&bond->slave_arr_work); in bond_work_cancel_all()
4404 struct bonding *bond = netdev_priv(bond_dev); in bond_open() local
4408 if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) { in bond_open()
4409 bond->rr_tx_counter = alloc_percpu(u32); in bond_open()
4410 if (!bond->rr_tx_counter) in bond_open()
4415 if (bond_has_slaves(bond)) { in bond_open()
4416 bond_for_each_slave(bond, slave, iter) { in bond_open()
4417 if (bond_uses_primary(bond) && in bond_open()
4418 slave != rcu_access_pointer(bond->curr_active_slave)) { in bond_open()
4421 } else if (BOND_MODE(bond) != BOND_MODE_8023AD) { in bond_open()
4428 if (bond_is_lb(bond)) { in bond_open()
4432 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB))) in bond_open()
4434 if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB) in bond_open()
4435 queue_delayed_work(bond->wq, &bond->alb_work, 0); in bond_open()
4438 if (bond->params.miimon) /* link check interval, in milliseconds. */ in bond_open()
4439 queue_delayed_work(bond->wq, &bond->mii_work, 0); in bond_open()
4441 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ in bond_open()
4442 queue_delayed_work(bond->wq, &bond->arp_work, 0); in bond_open()
4443 bond->recv_probe = bond_rcv_validate; in bond_open()
4446 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_open()
4447 queue_delayed_work(bond->wq, &bond->ad_work, 0); in bond_open()
4449 bond->recv_probe = bond_3ad_lacpdu_recv; in bond_open()
4450 bond_3ad_initiate_agg_selection(bond, 1); in bond_open()
4452 bond_for_each_slave(bond, slave, iter) in bond_open()
4456 if (bond_mode_can_use_xmit_hash(bond)) in bond_open()
4457 bond_update_slave_arr(bond, NULL); in bond_open()
4464 struct bonding *bond = netdev_priv(bond_dev); in bond_close() local
4467 bond_work_cancel_all(bond); in bond_close()
4468 bond->send_peer_notif = 0; in bond_close()
4469 if (bond_is_lb(bond)) in bond_close()
4470 bond_alb_deinitialize(bond); in bond_close()
4471 bond->recv_probe = NULL; in bond_close()
4473 if (bond_uses_primary(bond)) { in bond_close()
4475 slave = rcu_dereference(bond->curr_active_slave); in bond_close()
4482 bond_for_each_slave(bond, slave, iter) in bond_close()
4562 struct bonding *bond = netdev_priv(bond_dev); in bond_get_stats() local
4574 spin_lock_nested(&bond->stats_lock, nest_level); in bond_get_stats()
4575 memcpy(stats, &bond->bond_stats, sizeof(*stats)); in bond_get_stats()
4577 bond_for_each_slave_rcu(bond, slave, iter) { in bond_get_stats()
4587 memcpy(&bond->bond_stats, stats, sizeof(*stats)); in bond_get_stats()
4588 spin_unlock(&bond->stats_lock); in bond_get_stats()
4594 struct bonding *bond = netdev_priv(bond_dev); in bond_eth_ioctl() local
4617 if (netif_carrier_ok(bond->dev)) in bond_eth_ioctl()
4631 struct bonding *bond = netdev_priv(bond_dev); in bond_do_ioctl() local
4695 res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE, in bond_do_ioctl()
4730 struct bonding *bond = netdev_priv(bond_dev); in bond_change_rx_flags() local
4733 bond_set_promiscuity(bond, in bond_change_rx_flags()
4737 bond_set_allmulti(bond, in bond_change_rx_flags()
4743 struct bonding *bond = netdev_priv(bond_dev); in bond_set_rx_mode() local
4748 if (bond_uses_primary(bond)) { in bond_set_rx_mode()
4749 slave = rcu_dereference(bond->curr_active_slave); in bond_set_rx_mode()
4755 bond_for_each_slave_rcu(bond, slave, iter) { in bond_set_rx_mode()
4765 struct bonding *bond = netdev_priv(n->dev); in bond_neigh_init() local
4772 slave = bond_first_slave_rcu(bond); in bond_neigh_init()
4821 struct bonding *bond = netdev_priv(bond_dev); in bond_change_mtu() local
4826 netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu); in bond_change_mtu()
4828 bond_for_each_slave(bond, slave, iter) { in bond_change_mtu()
4855 bond_for_each_slave(bond, rollback_slave, iter) { in bond_change_mtu()
4878 struct bonding *bond = netdev_priv(bond_dev); in bond_set_mac_address() local
4884 if (BOND_MODE(bond) == BOND_MODE_ALB) in bond_set_mac_address()
4888 netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond); in bond_set_mac_address()
4893 if (bond->params.fail_over_mac && in bond_set_mac_address()
4894 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) in bond_set_mac_address()
4900 bond_for_each_slave(bond, slave, iter) { in bond_set_mac_address()
4926 bond_for_each_slave(bond, rollback_slave, iter) { in bond_set_mac_address()
4945 * @bond: bonding device that is transmitting
4951 static struct slave *bond_get_slave_by_id(struct bonding *bond, in bond_get_slave_by_id() argument
4959 bond_for_each_slave_rcu(bond, slave, iter) { in bond_get_slave_by_id()
4968 bond_for_each_slave_rcu(bond, slave, iter) { in bond_get_slave_by_id()
4980 * @bond: bonding device to use
4986 static u32 bond_rr_gen_slave_id(struct bonding *bond) in bond_rr_gen_slave_id() argument
4990 int packets_per_slave = bond->params.packets_per_slave; in bond_rr_gen_slave_id()
4997 slave_id = this_cpu_inc_return(*bond->rr_tx_counter); in bond_rr_gen_slave_id()
5001 bond->params.reciprocal_packets_per_slave; in bond_rr_gen_slave_id()
5002 slave_id = this_cpu_inc_return(*bond->rr_tx_counter); in bond_rr_gen_slave_id()
5011 static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond, in bond_xmit_roundrobin_slave_get() argument
5018 /* Start with the curr_active_slave that joined the bond as the in bond_xmit_roundrobin_slave_get()
5033 slave = rcu_dereference(bond->curr_active_slave); in bond_xmit_roundrobin_slave_get()
5036 return bond_get_slave_by_id(bond, 0); in bond_xmit_roundrobin_slave_get()
5041 slave_cnt = READ_ONCE(bond->slave_cnt); in bond_xmit_roundrobin_slave_get()
5043 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; in bond_xmit_roundrobin_slave_get()
5044 return bond_get_slave_by_id(bond, slave_id); in bond_xmit_roundrobin_slave_get()
5049 static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond, in bond_xdp_xmit_roundrobin_slave_get() argument
5074 slave = rcu_dereference(bond->curr_active_slave); in bond_xdp_xmit_roundrobin_slave_get()
5077 return bond_get_slave_by_id(bond, 0); in bond_xdp_xmit_roundrobin_slave_get()
5082 slave_cnt = READ_ONCE(bond->slave_cnt); in bond_xdp_xmit_roundrobin_slave_get()
5084 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; in bond_xdp_xmit_roundrobin_slave_get()
5085 return bond_get_slave_by_id(bond, slave_id); in bond_xdp_xmit_roundrobin_slave_get()
5093 struct bonding *bond = netdev_priv(bond_dev); in bond_xmit_roundrobin() local
5096 slave = bond_xmit_roundrobin_slave_get(bond, skb); in bond_xmit_roundrobin()
5098 return bond_dev_queue_xmit(bond, skb, slave->dev); in bond_xmit_roundrobin()
5103 static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond) in bond_xmit_activebackup_slave_get() argument
5105 return rcu_dereference(bond->curr_active_slave); in bond_xmit_activebackup_slave_get()
5108 /* In active-backup mode, we know that bond->curr_active_slave is always valid if
5109 * the bond has a usable interface.
5114 struct bonding *bond = netdev_priv(bond_dev); in bond_xmit_activebackup() local
5117 slave = bond_xmit_activebackup_slave_get(bond); in bond_xmit_activebackup()
5119 return bond_dev_queue_xmit(bond, skb, slave->dev); in bond_xmit_activebackup()
5128 void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay) in bond_slave_arr_work_rearm() argument
5130 queue_delayed_work(bond->wq, &bond->slave_arr_work, delay); in bond_slave_arr_work_rearm()
5136 struct bonding *bond = container_of(work, struct bonding, in bond_slave_arr_handler() local
5143 ret = bond_update_slave_arr(bond, NULL); in bond_slave_arr_handler()
5152 bond_slave_arr_work_rearm(bond, 1); in bond_slave_arr_handler()
5178 static void bond_set_slave_arr(struct bonding *bond, in bond_set_slave_arr() argument
5184 usable = rtnl_dereference(bond->usable_slaves); in bond_set_slave_arr()
5185 rcu_assign_pointer(bond->usable_slaves, usable_slaves); in bond_set_slave_arr()
5188 all = rtnl_dereference(bond->all_slaves); in bond_set_slave_arr()
5189 rcu_assign_pointer(bond->all_slaves, all_slaves); in bond_set_slave_arr()
5193 static void bond_reset_slave_arr(struct bonding *bond) in bond_reset_slave_arr() argument
5195 bond_set_slave_arr(bond, NULL, NULL); in bond_reset_slave_arr()
5206 int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) in bond_update_slave_arr() argument
5217 bond->slave_cnt), GFP_KERNEL); in bond_update_slave_arr()
5219 bond->slave_cnt), GFP_KERNEL); in bond_update_slave_arr()
5224 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_update_slave_arr()
5227 spin_lock_bh(&bond->mode_lock); in bond_update_slave_arr()
5228 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { in bond_update_slave_arr()
5229 spin_unlock_bh(&bond->mode_lock); in bond_update_slave_arr()
5234 bond_reset_slave_arr(bond); in bond_update_slave_arr()
5237 spin_unlock_bh(&bond->mode_lock); in bond_update_slave_arr()
5240 bond_for_each_slave(bond, slave, iter) { in bond_update_slave_arr()
5245 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_update_slave_arr()
5255 slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n", in bond_update_slave_arr()
5261 bond_set_slave_arr(bond, usable_slaves, all_slaves); in bond_update_slave_arr()
5265 bond_skip_slave(rtnl_dereference(bond->all_slaves), in bond_update_slave_arr()
5267 bond_skip_slave(rtnl_dereference(bond->usable_slaves), in bond_update_slave_arr()
5276 static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond, in bond_xmit_3ad_xor_slave_get() argument
5284 hash = bond_xmit_hash(bond, skb); in bond_xmit_3ad_xor_slave_get()
5293 static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond, in bond_xdp_xmit_3ad_xor_slave_get() argument
5300 hash = bond_xmit_hash_xdp(bond, xdp); in bond_xdp_xmit_3ad_xor_slave_get()
5301 slaves = rcu_dereference(bond->usable_slaves); in bond_xdp_xmit_3ad_xor_slave_get()
5316 struct bonding *bond = netdev_priv(dev); in bond_3ad_xor_xmit() local
5320 slaves = rcu_dereference(bond->usable_slaves); in bond_3ad_xor_xmit()
5321 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); in bond_3ad_xor_xmit()
5323 return bond_dev_queue_xmit(bond, skb, slave->dev); in bond_3ad_xor_xmit()
5332 struct bonding *bond = netdev_priv(bond_dev); in bond_xmit_broadcast() local
5338 bond_for_each_slave_rcu(bond, slave, iter) { in bond_xmit_broadcast()
5344 if (bond_is_last_slave(bond, slave)) { in bond_xmit_broadcast()
5356 if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK) in bond_xmit_broadcast()
5373 static inline int bond_slave_override(struct bonding *bond, in bond_slave_override() argument
5383 bond_for_each_slave_rcu(bond, slave, iter) { in bond_slave_override()
5387 bond_dev_queue_xmit(bond, skb, slave->dev); in bond_slave_override()
5424 struct bonding *bond = netdev_priv(master_dev); in bond_xmit_get_slave() local
5428 switch (BOND_MODE(bond)) { in bond_xmit_get_slave()
5430 slave = bond_xmit_roundrobin_slave_get(bond, skb); in bond_xmit_get_slave()
5433 slave = bond_xmit_activebackup_slave_get(bond); in bond_xmit_get_slave()
5438 slaves = rcu_dereference(bond->all_slaves); in bond_xmit_get_slave()
5440 slaves = rcu_dereference(bond->usable_slaves); in bond_xmit_get_slave()
5441 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); in bond_xmit_get_slave()
5446 slave = bond_xmit_alb_slave_get(bond, skb); in bond_xmit_get_slave()
5449 slave = bond_xmit_tlb_slave_get(bond, skb); in bond_xmit_get_slave()
5508 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond, in __bond_sk_get_lower_dev() argument
5516 slaves = rcu_dereference(bond->usable_slaves); in __bond_sk_get_lower_dev()
5530 struct bonding *bond = netdev_priv(dev); in bond_sk_get_lower_dev() local
5534 if (bond_sk_check(bond)) in bond_sk_get_lower_dev()
5535 lower = __bond_sk_get_lower_dev(bond, sk); in bond_sk_get_lower_dev()
5542 static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb, in bond_tls_device_xmit() argument
5551 if (likely(bond_get_slave_by_dev(bond, tls_netdev))) in bond_tls_device_xmit()
5552 return bond_dev_queue_xmit(bond, skb, tls_netdev); in bond_tls_device_xmit()
5559 struct bonding *bond = netdev_priv(dev); in __bond_start_xmit() local
5561 if (bond_should_override_tx_queue(bond) && in __bond_start_xmit()
5562 !bond_slave_override(bond, skb)) in __bond_start_xmit()
5567 return bond_tls_device_xmit(bond, skb, dev); in __bond_start_xmit()
5570 switch (BOND_MODE(bond)) { in __bond_start_xmit()
5586 netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond)); in __bond_start_xmit()
5594 struct bonding *bond = netdev_priv(dev); in bond_start_xmit() local
5604 if (bond_has_slaves(bond)) in bond_start_xmit()
5616 struct bonding *bond = netdev_priv(bond_dev); in bond_xdp_get_xmit_slave() local
5621 switch (BOND_MODE(bond)) { in bond_xdp_get_xmit_slave()
5623 slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp); in bond_xdp_get_xmit_slave()
5627 slave = bond_xmit_activebackup_slave_get(bond); in bond_xdp_get_xmit_slave()
5632 slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp); in bond_xdp_get_xmit_slave()
5638 BOND_MODE(bond)); in bond_xdp_get_xmit_slave()
5688 struct bonding *bond = netdev_priv(dev); in bond_xdp_set() local
5702 if (!bond_xdp_check(bond, BOND_MODE(bond))) { in bond_xdp_set()
5708 old_prog = bond->xdp_prog; in bond_xdp_set()
5709 bond->xdp_prog = prog; in bond_xdp_set()
5711 bond_for_each_slave(bond, slave, iter) { in bond_xdp_set()
5750 bond->xdp_prog = old_prog; in bond_xdp_set()
5754 bond_for_each_slave(bond, rollback_slave, iter) { in bond_xdp_set()
5815 struct bonding *bond = netdev_priv(dev); in bond_hwtstamp_get() local
5819 real_dev = bond_option_active_slave_get_rcu(bond); in bond_hwtstamp_get()
5834 struct bonding *bond = netdev_priv(dev); in bond_hwtstamp_set() local
5841 real_dev = bond_option_active_slave_get_rcu(bond); in bond_hwtstamp_set()
5855 struct bonding *bond = netdev_priv(bond_dev); in bond_ethtool_get_link_ksettings() local
5868 bond_for_each_slave(bond, slave, iter) { in bond_ethtool_get_link_ksettings()
5872 if (BOND_MODE(bond) == BOND_MODE_BROADCAST) in bond_ethtool_get_link_ksettings()
5899 struct bonding *bond = netdev_priv(bond_dev); in bond_ethtool_get_ts_info() local
5908 real_dev = bond_option_active_slave_get_rcu(bond); in bond_ethtool_get_ts_info()
5917 bond_for_each_slave_rcu(bond, slave, iter) { in bond_ethtool_get_ts_info()
5981 .name = "bond",
5986 struct bonding *bond = netdev_priv(bond_dev); in bond_destructor() local
5988 if (bond->wq) in bond_destructor()
5989 destroy_workqueue(bond->wq); in bond_destructor()
5991 free_percpu(bond->rr_tx_counter); in bond_destructor()
5996 struct bonding *bond = netdev_priv(bond_dev); in bond_setup() local
5998 spin_lock_init(&bond->mode_lock); in bond_setup()
5999 bond->params = bonding_defaults; in bond_setup()
6002 bond->dev = bond_dev; in bond_setup()
6023 INIT_LIST_HEAD(&bond->ipsec_list); in bond_setup()
6024 mutex_init(&bond->ipsec_lock); in bond_setup()
6027 /* don't acquire bond device's netif_tx_lock when transmitting */ in bond_setup()
6030 /* Don't allow bond devices to change network namespaces. */ in bond_setup()
6033 /* By default, we declare the bond to be fully in bond_setup()
6053 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) in bond_setup()
6063 struct bonding *bond = netdev_priv(bond_dev); in bond_uninit() local
6070 bond_for_each_slave(bond, slave, iter) in bond_uninit()
6075 mutex_destroy(&bond->ipsec_lock); in bond_uninit()
6078 bond_set_slave_arr(bond, NULL, NULL); in bond_uninit()
6080 list_del_rcu(&bond->bond_list); in bond_uninit()
6082 bond_debug_unregister(bond); in bond_uninit()
6480 struct bonding *bond = netdev_priv(bond_dev); in bond_init() local
6485 bond->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, in bond_init()
6487 if (!bond->wq) in bond_init()
6490 bond->notifier_ctx = false; in bond_init()
6492 spin_lock_init(&bond->stats_lock); in bond_init()
6495 list_add_tail_rcu(&bond->bond_list, &bn->dev_list); in bond_init()
6497 bond_prepare_sysfs_group(bond); in bond_init()
6499 bond_debug_register(bond); in bond_init()
6514 /* Create a new bond based on the specified name and bonding parameters.
6515 * If name is NULL, obtain a suitable "bond%d" name for us.
6522 struct bonding *bond; in bond_create() local
6528 name ? name : "bond%d", NET_NAME_UNKNOWN, in bond_create()
6533 bond = netdev_priv(bond_dev); in bond_create()
6545 bond_work_init_all(bond); in bond_create()
6566 * race condition in bond unloading") we need to remove sysfs files
6582 /* Kill off any bonds created after unregistering bond rtnl ops */ in bond_net_exit_batch_rtnl()
6584 struct bonding *bond, *tmp_bond; in bond_net_exit_batch_rtnl() local
6587 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) in bond_net_exit_batch_rtnl()
6588 unregister_netdevice_queue(bond->dev, dev_kill_list); in bond_net_exit_batch_rtnl()