1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Chelsio Communications, Inc. All rights reserved. */
3
4 #include "cxgb4.h"
5
cxgb4_mps_ref_dec_by_mac(struct adapter * adap,const u8 * addr,const u8 * mask)6 static int cxgb4_mps_ref_dec_by_mac(struct adapter *adap,
7 const u8 *addr, const u8 *mask)
8 {
9 u8 bitmask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
10 struct mps_entries_ref *mps_entry, *tmp;
11 int ret = -EINVAL;
12
13 spin_lock_bh(&adap->mps_ref_lock);
14 list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
15 if (ether_addr_equal(mps_entry->addr, addr) &&
16 ether_addr_equal(mps_entry->mask, mask ? mask : bitmask)) {
17 if (!refcount_dec_and_test(&mps_entry->refcnt)) {
18 spin_unlock_bh(&adap->mps_ref_lock);
19 return -EBUSY;
20 }
21 list_del(&mps_entry->list);
22 kfree(mps_entry);
23 ret = 0;
24 break;
25 }
26 }
27 spin_unlock_bh(&adap->mps_ref_lock);
28 return ret;
29 }
30
cxgb4_mps_ref_inc(struct adapter * adap,const u8 * mac_addr,u16 idx,const u8 * mask)31 static int cxgb4_mps_ref_inc(struct adapter *adap, const u8 *mac_addr,
32 u16 idx, const u8 *mask)
33 {
34 u8 bitmask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
35 struct mps_entries_ref *mps_entry;
36 int ret = 0;
37
38 spin_lock_bh(&adap->mps_ref_lock);
39 list_for_each_entry(mps_entry, &adap->mps_ref, list) {
40 if (mps_entry->idx == idx) {
41 refcount_inc(&mps_entry->refcnt);
42 goto unlock;
43 }
44 }
45 mps_entry = kzalloc(sizeof(*mps_entry), GFP_ATOMIC);
46 if (!mps_entry) {
47 ret = -ENOMEM;
48 goto unlock;
49 }
50 ether_addr_copy(mps_entry->mask, mask ? mask : bitmask);
51 ether_addr_copy(mps_entry->addr, mac_addr);
52 mps_entry->idx = idx;
53 refcount_set(&mps_entry->refcnt, 1);
54 list_add_tail(&mps_entry->list, &adap->mps_ref);
55 unlock:
56 spin_unlock_bh(&adap->mps_ref_lock);
57 return ret;
58 }
59
cxgb4_free_mac_filt(struct adapter * adap,unsigned int viid,unsigned int naddr,const u8 ** addr,bool sleep_ok)60 int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
61 unsigned int naddr, const u8 **addr, bool sleep_ok)
62 {
63 int ret, i;
64
65 for (i = 0; i < naddr; i++) {
66 if (!cxgb4_mps_ref_dec_by_mac(adap, addr[i], NULL)) {
67 ret = t4_free_mac_filt(adap, adap->mbox, viid,
68 1, &addr[i], sleep_ok);
69 if (ret < 0)
70 return ret;
71 }
72 }
73
74 /* return number of filters freed */
75 return naddr;
76 }
77
cxgb4_alloc_mac_filt(struct adapter * adap,unsigned int viid,bool free,unsigned int naddr,const u8 ** addr,u16 * idx,u64 * hash,bool sleep_ok)78 int cxgb4_alloc_mac_filt(struct adapter *adap, unsigned int viid,
79 bool free, unsigned int naddr, const u8 **addr,
80 u16 *idx, u64 *hash, bool sleep_ok)
81 {
82 int ret, i;
83
84 ret = t4_alloc_mac_filt(adap, adap->mbox, viid, free,
85 naddr, addr, idx, hash, sleep_ok);
86 if (ret < 0)
87 return ret;
88
89 for (i = 0; i < naddr; i++) {
90 if (idx[i] != 0xffff) {
91 if (cxgb4_mps_ref_inc(adap, addr[i], idx[i], NULL)) {
92 ret = -ENOMEM;
93 goto error;
94 }
95 }
96 }
97
98 goto out;
99 error:
100 cxgb4_free_mac_filt(adap, viid, naddr, addr, sleep_ok);
101
102 out:
103 /* Returns a negative error number or the number of filters allocated */
104 return ret;
105 }
106
cxgb4_update_mac_filt(struct port_info * pi,unsigned int viid,int * tcam_idx,const u8 * addr,bool persistent,u8 * smt_idx)107 int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
108 int *tcam_idx, const u8 *addr,
109 bool persistent, u8 *smt_idx)
110 {
111 int ret;
112
113 ret = cxgb4_change_mac(pi, viid, tcam_idx,
114 addr, persistent, smt_idx);
115 if (ret < 0)
116 return ret;
117
118 cxgb4_mps_ref_inc(pi->adapter, addr, *tcam_idx, NULL);
119 return ret;
120 }
121
cxgb4_init_mps_ref_entries(struct adapter * adap)122 int cxgb4_init_mps_ref_entries(struct adapter *adap)
123 {
124 spin_lock_init(&adap->mps_ref_lock);
125 INIT_LIST_HEAD(&adap->mps_ref);
126
127 return 0;
128 }
129
cxgb4_free_mps_ref_entries(struct adapter * adap)130 void cxgb4_free_mps_ref_entries(struct adapter *adap)
131 {
132 struct mps_entries_ref *mps_entry, *tmp;
133
134 if (list_empty(&adap->mps_ref))
135 return;
136
137 spin_lock(&adap->mps_ref_lock);
138 list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
139 list_del(&mps_entry->list);
140 kfree(mps_entry);
141 }
142 spin_unlock(&adap->mps_ref_lock);
143 }
144