Lines Matching full:lag

6 /* LAG group config flags. */
15 /* LAG port state flags. */
27 * struct nfp_flower_cmsg_lag_config - control message payload for LAG config
49 * struct nfp_fl_lag_group - list entry for each LAG group
86 static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag) in nfp_fl_get_next_pkt_number() argument
88 lag->pkt_num++; in nfp_fl_get_next_pkt_number()
89 lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK; in nfp_fl_get_next_pkt_number()
91 return lag->pkt_num; in nfp_fl_get_next_pkt_number()
94 static void nfp_fl_increment_version(struct nfp_fl_lag *lag) in nfp_fl_increment_version() argument
97 lag->batch_ver += 2; in nfp_fl_increment_version()
98 lag->batch_ver &= NFP_FL_LAG_VERSION_MASK; in nfp_fl_increment_version()
101 if (!lag->batch_ver) in nfp_fl_increment_version()
102 lag->batch_ver += 2; in nfp_fl_increment_version()
106 nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master) in nfp_fl_lag_group_create() argument
112 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_group_create()
114 id = ida_alloc_range(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN, in nfp_fl_lag_group_create()
124 ida_free(&lag->ida_handle, id); in nfp_fl_lag_group_create()
135 group->group_inst = ++lag->global_inst; in nfp_fl_lag_group_create()
136 list_add_tail(&group->list, &lag->group_list); in nfp_fl_lag_group_create()
142 nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag, in nfp_fl_lag_find_group_for_master_with_lag() argument
150 list_for_each_entry(entry, &lag->group_list, list) in nfp_fl_lag_find_group_for_master_with_lag()
200 NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action"); in nfp_flower_lag_populate_pre_action()
209 struct nfp_tun_neigh_lag *lag) in nfp_flower_lag_get_info_from_netdev() argument
212 lag->lag_version, &lag->lag_instance); in nfp_flower_lag_get_info_from_netdev()
232 nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group, in nfp_fl_lag_config_group() argument
242 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_config_group()
256 nfp_fl_increment_version(lag); in nfp_fl_lag_config_group()
261 if (lag->rst_cfg) { in nfp_fl_lag_config_group()
271 lag->rst_cfg = false; in nfp_fl_lag_config_group()
283 cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver); in nfp_fl_lag_config_group()
284 cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag)); in nfp_fl_lag_config_group()
300 struct nfp_fl_lag *lag; in nfp_fl_lag_do_work() local
304 lag = container_of(delayed_work, struct nfp_fl_lag, work); in nfp_fl_lag_do_work()
305 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_do_work()
307 mutex_lock(&lag->lock); in nfp_fl_lag_do_work()
308 list_for_each_entry_safe(entry, storage, &lag->group_list, list) { in nfp_fl_lag_do_work()
317 err = nfp_fl_lag_config_group(lag, entry, NULL, 0, in nfp_fl_lag_do_work()
325 schedule_delayed_work(&lag->work, in nfp_fl_lag_do_work()
331 ida_free(&lag->ida_handle, entry->group_id); in nfp_fl_lag_do_work()
341 schedule_delayed_work(&lag->work, in nfp_fl_lag_do_work()
390 err = nfp_fl_lag_config_group(lag, entry, acti_netdevs, in nfp_fl_lag_do_work()
398 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); in nfp_fl_lag_do_work()
407 err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch); in nfp_fl_lag_do_work()
413 mutex_unlock(&lag->lock); in nfp_fl_lag_do_work()
417 nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb) in nfp_fl_lag_put_unprocessed() argument
429 if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT) in nfp_fl_lag_put_unprocessed()
432 __skb_queue_tail(&lag->retrans_skbs, skb); in nfp_fl_lag_put_unprocessed()
437 static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag) in nfp_fl_send_unprocessed() argument
442 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_send_unprocessed()
444 while ((skb = __skb_dequeue(&lag->retrans_skbs))) in nfp_fl_send_unprocessed()
480 * 4) Schedule a LAG config update in nfp_flower_lag_unprocessed_msg()
503 nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag, in nfp_fl_lag_schedule_group_remove() argument
508 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); in nfp_fl_lag_schedule_group_remove()
512 nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag, in nfp_fl_lag_schedule_group_delete() argument
518 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_schedule_group_delete()
523 mutex_lock(&lag->lock); in nfp_fl_lag_schedule_group_delete()
524 group = nfp_fl_lag_find_group_for_master_with_lag(lag, master); in nfp_fl_lag_schedule_group_delete()
526 mutex_unlock(&lag->lock); in nfp_fl_lag_schedule_group_delete()
534 mutex_unlock(&lag->lock); in nfp_fl_lag_schedule_group_delete()
536 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); in nfp_fl_lag_schedule_group_delete()
540 nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag, in nfp_fl_lag_changeupper_event() argument
554 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_changeupper_event()
590 mutex_lock(&lag->lock); in nfp_fl_lag_changeupper_event()
591 group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper); in nfp_fl_lag_changeupper_event()
596 nfp_fl_lag_schedule_group_remove(lag, group); in nfp_fl_lag_changeupper_event()
598 mutex_unlock(&lag->lock); in nfp_fl_lag_changeupper_event()
603 group = nfp_fl_lag_group_create(lag, upper); in nfp_fl_lag_changeupper_event()
605 mutex_unlock(&lag->lock); in nfp_fl_lag_changeupper_event()
615 mutex_unlock(&lag->lock); in nfp_fl_lag_changeupper_event()
617 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); in nfp_fl_lag_changeupper_event()
622 nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev, in nfp_fl_lag_changels_event() argument
638 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_changels_event()
648 mutex_lock(&lag->lock); in nfp_fl_lag_changels_event()
660 mutex_unlock(&lag->lock); in nfp_fl_lag_changels_event()
662 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); in nfp_fl_lag_changels_event()
669 struct nfp_fl_lag *lag = &priv->nfp_lag; in nfp_flower_lag_netdev_event() local
674 err = nfp_fl_lag_changeupper_event(lag, ptr); in nfp_flower_lag_netdev_event()
679 nfp_fl_lag_changels_event(lag, netdev, ptr); in nfp_flower_lag_netdev_event()
682 nfp_fl_lag_schedule_group_delete(lag, netdev); in nfp_flower_lag_netdev_event()
689 int nfp_flower_lag_reset(struct nfp_fl_lag *lag) in nfp_flower_lag_reset() argument
693 lag->rst_cfg = true; in nfp_flower_lag_reset()
694 return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch); in nfp_flower_lag_reset()
697 void nfp_flower_lag_init(struct nfp_fl_lag *lag) in nfp_flower_lag_init() argument
699 INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work); in nfp_flower_lag_init()
700 INIT_LIST_HEAD(&lag->group_list); in nfp_flower_lag_init()
701 mutex_init(&lag->lock); in nfp_flower_lag_init()
702 ida_init(&lag->ida_handle); in nfp_flower_lag_init()
704 __skb_queue_head_init(&lag->retrans_skbs); in nfp_flower_lag_init()
707 nfp_fl_increment_version(lag); in nfp_flower_lag_init()
710 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag) in nfp_flower_lag_cleanup() argument
714 cancel_delayed_work_sync(&lag->work); in nfp_flower_lag_cleanup()
716 __skb_queue_purge(&lag->retrans_skbs); in nfp_flower_lag_cleanup()
719 mutex_lock(&lag->lock); in nfp_flower_lag_cleanup()
720 list_for_each_entry_safe(entry, storage, &lag->group_list, list) { in nfp_flower_lag_cleanup()
724 mutex_unlock(&lag->lock); in nfp_flower_lag_cleanup()
725 mutex_destroy(&lag->lock); in nfp_flower_lag_cleanup()
726 ida_destroy(&lag->ida_handle); in nfp_flower_lag_cleanup()