1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3
4 #include <linux/build_bug.h>
5 #include <linux/list.h>
6 #include <linux/notifier.h>
7 #include <net/netevent.h>
8 #include <net/switchdev.h>
9 #include "lib/devcom.h"
10 #include "bridge.h"
11 #include "eswitch.h"
12 #include "bridge_priv.h"
13 #define CREATE_TRACE_POINTS
14 #include "diag/bridge_tracepoint.h"
15
16 static const struct rhashtable_params fdb_ht_params = {
17 .key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
18 .key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
19 .head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
20 .automatic_shrinking = true,
21 };
22
23 static void
mlx5_esw_bridge_fdb_offload_notify(struct net_device * dev,const unsigned char * addr,u16 vid,unsigned long val)24 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
25 unsigned long val)
26 {
27 struct switchdev_notifier_fdb_info send_info = {};
28
29 send_info.addr = addr;
30 send_info.vid = vid;
31 send_info.offloaded = true;
32 call_switchdev_notifiers(val, dev, &send_info.info, NULL);
33 }
34
35 static void
mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry * entry)36 mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
37 {
38 if (!(entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | MLX5_ESW_BRIDGE_FLAG_PEER)))
39 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
40 entry->key.vid,
41 SWITCHDEV_FDB_DEL_TO_BRIDGE);
42 }
43
mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch * esw)44 static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw)
45 {
46 return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) &&
47 MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) &&
48 MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >=
49 offsetof(struct vlan_ethhdr, h_vlan_proto);
50 }
51
52 static struct mlx5_pkt_reformat *
mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch * esw)53 mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
54 {
55 struct mlx5_pkt_reformat_params reformat_params = {};
56
57 reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
58 reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
59 reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
60 reformat_params.size = sizeof(struct vlan_hdr);
61 return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
62 }
63
64 struct mlx5_flow_table *
mlx5_esw_bridge_table_create(int max_fte,u32 level,struct mlx5_eswitch * esw)65 mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
66 {
67 struct mlx5_flow_table_attr ft_attr = {};
68 struct mlx5_core_dev *dev = esw->dev;
69 struct mlx5_flow_namespace *ns;
70 struct mlx5_flow_table *fdb;
71
72 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
73 if (!ns) {
74 esw_warn(dev, "Failed to get FDB namespace\n");
75 return ERR_PTR(-ENOENT);
76 }
77
78 ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
79 ft_attr.max_fte = max_fte;
80 ft_attr.level = level;
81 ft_attr.prio = FDB_BR_OFFLOAD;
82 fdb = mlx5_create_flow_table(ns, &ft_attr);
83 if (IS_ERR(fdb))
84 esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
85
86 return fdb;
87 }
88
89 static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from,unsigned int to,u16 vlan_proto,struct mlx5_eswitch * esw,struct mlx5_flow_table * ingress_ft)90 mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
91 struct mlx5_eswitch *esw,
92 struct mlx5_flow_table *ingress_ft)
93 {
94 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
95 struct mlx5_flow_group *fg;
96 u32 *in, *match;
97
98 in = kvzalloc(inlen, GFP_KERNEL);
99 if (!in)
100 return ERR_PTR(-ENOMEM);
101
102 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
103 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
104 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
105
106 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
107 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
108 if (vlan_proto == ETH_P_8021Q)
109 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
110 else if (vlan_proto == ETH_P_8021AD)
111 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
112 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
113
114 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
115 mlx5_eswitch_get_vport_metadata_mask());
116
117 MLX5_SET(create_flow_group_in, in, start_flow_index, from);
118 MLX5_SET(create_flow_group_in, in, end_flow_index, to);
119
120 fg = mlx5_create_flow_group(ingress_ft, in);
121 kvfree(in);
122 if (IS_ERR(fg))
123 esw_warn(esw->dev,
124 "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
125 vlan_proto, PTR_ERR(fg));
126
127 return fg;
128 }
129
130 static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * ingress_ft)131 mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw,
132 struct mlx5_flow_table *ingress_ft)
133 {
134 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM;
135 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO;
136
137 return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, ingress_ft);
138 }
139
140 static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_qinq_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * ingress_ft)141 mlx5_esw_bridge_ingress_qinq_fg_create(struct mlx5_eswitch *esw,
142 struct mlx5_flow_table *ingress_ft)
143 {
144 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM;
145 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO;
146
147 return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw,
148 ingress_ft);
149 }
150
151 static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from,unsigned int to,u16 vlan_proto,struct mlx5_eswitch * esw,struct mlx5_flow_table * ingress_ft)152 mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned int to,
153 u16 vlan_proto, struct mlx5_eswitch *esw,
154 struct mlx5_flow_table *ingress_ft)
155 {
156 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
157 struct mlx5_flow_group *fg;
158 u32 *in, *match;
159
160 in = kvzalloc(inlen, GFP_KERNEL);
161 if (!in)
162 return ERR_PTR(-ENOMEM);
163
164 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
165 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
166 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
167
168 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
169 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
170 if (vlan_proto == ETH_P_8021Q)
171 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
172 else if (vlan_proto == ETH_P_8021AD)
173 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
174 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
175 mlx5_eswitch_get_vport_metadata_mask());
176
177 MLX5_SET(create_flow_group_in, in, start_flow_index, from);
178 MLX5_SET(create_flow_group_in, in, end_flow_index, to);
179
180 fg = mlx5_create_flow_group(ingress_ft, in);
181 if (IS_ERR(fg))
182 esw_warn(esw->dev,
183 "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
184 PTR_ERR(fg));
185 kvfree(in);
186 return fg;
187 }
188
189 static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_vlan_filter_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * ingress_ft)190 mlx5_esw_bridge_ingress_vlan_filter_fg_create(struct mlx5_eswitch *esw,
191 struct mlx5_flow_table *ingress_ft)
192 {
193 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM;
194 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO;
195
196 return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021Q, esw,
197 ingress_ft);
198 }
199
200 static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_qinq_filter_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * ingress_ft)201 mlx5_esw_bridge_ingress_qinq_filter_fg_create(struct mlx5_eswitch *esw,
202 struct mlx5_flow_table *ingress_ft)
203 {
204 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM;
205 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO;
206
207 return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021AD, esw,
208 ingress_ft);
209 }
210
211 static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * ingress_ft)212 mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
213 {
214 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
215 struct mlx5_flow_group *fg;
216 u32 *in, *match;
217
218 in = kvzalloc(inlen, GFP_KERNEL);
219 if (!in)
220 return ERR_PTR(-ENOMEM);
221
222 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
223 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
224 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
225
226 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
227 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
228
229 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
230 mlx5_eswitch_get_vport_metadata_mask());
231
232 MLX5_SET(create_flow_group_in, in, start_flow_index,
233 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
234 MLX5_SET(create_flow_group_in, in, end_flow_index,
235 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
236
237 fg = mlx5_create_flow_group(ingress_ft, in);
238 if (IS_ERR(fg))
239 esw_warn(esw->dev,
240 "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
241 PTR_ERR(fg));
242
243 kvfree(in);
244 return fg;
245 }
246
247 static struct mlx5_flow_group *
mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from,unsigned int to,u16 vlan_proto,struct mlx5_eswitch * esw,struct mlx5_flow_table * egress_ft)248 mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
249 struct mlx5_eswitch *esw,
250 struct mlx5_flow_table *egress_ft)
251 {
252 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
253 struct mlx5_flow_group *fg;
254 u32 *in, *match;
255
256 in = kvzalloc(inlen, GFP_KERNEL);
257 if (!in)
258 return ERR_PTR(-ENOMEM);
259
260 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
261 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
262
263 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
264 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
265 if (vlan_proto == ETH_P_8021Q)
266 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
267 else if (vlan_proto == ETH_P_8021AD)
268 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
269 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
270
271 MLX5_SET(create_flow_group_in, in, start_flow_index, from);
272 MLX5_SET(create_flow_group_in, in, end_flow_index, to);
273
274 fg = mlx5_create_flow_group(egress_ft, in);
275 if (IS_ERR(fg))
276 esw_warn(esw->dev,
277 "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
278 PTR_ERR(fg));
279 kvfree(in);
280 return fg;
281 }
282
283 static struct mlx5_flow_group *
mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * egress_ft)284 mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
285 {
286 unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM;
287 unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO;
288
289 return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, egress_ft);
290 }
291
292 static struct mlx5_flow_group *
mlx5_esw_bridge_egress_qinq_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * egress_ft)293 mlx5_esw_bridge_egress_qinq_fg_create(struct mlx5_eswitch *esw,
294 struct mlx5_flow_table *egress_ft)
295 {
296 unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM;
297 unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO;
298
299 return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, egress_ft);
300 }
301
302 static struct mlx5_flow_group *
mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * egress_ft)303 mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
304 {
305 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
306 struct mlx5_flow_group *fg;
307 u32 *in, *match;
308
309 in = kvzalloc(inlen, GFP_KERNEL);
310 if (!in)
311 return ERR_PTR(-ENOMEM);
312
313 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
314 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
315
316 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
317 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
318
319 MLX5_SET(create_flow_group_in, in, start_flow_index,
320 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
321 MLX5_SET(create_flow_group_in, in, end_flow_index,
322 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
323
324 fg = mlx5_create_flow_group(egress_ft, in);
325 if (IS_ERR(fg))
326 esw_warn(esw->dev,
327 "Failed to create bridge egress table MAC flow group (err=%ld)\n",
328 PTR_ERR(fg));
329 kvfree(in);
330 return fg;
331 }
332
333 static struct mlx5_flow_group *
mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * egress_ft)334 mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
335 {
336 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
337 struct mlx5_flow_group *fg;
338 u32 *in, *match;
339
340 in = kvzalloc(inlen, GFP_KERNEL);
341 if (!in)
342 return ERR_PTR(-ENOMEM);
343
344 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
345 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
346
347 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
348
349 MLX5_SET(create_flow_group_in, in, start_flow_index,
350 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM);
351 MLX5_SET(create_flow_group_in, in, end_flow_index,
352 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO);
353
354 fg = mlx5_create_flow_group(egress_ft, in);
355 if (IS_ERR(fg))
356 esw_warn(esw->dev,
357 "Failed to create bridge egress table miss flow group (err=%ld)\n",
358 PTR_ERR(fg));
359 kvfree(in);
360 return fg;
361 }
362
363 static int
mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads * br_offloads)364 mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
365 {
366 struct mlx5_flow_group *mac_fg, *qinq_filter_fg, *qinq_fg, *vlan_filter_fg, *vlan_fg;
367 struct mlx5_flow_table *ingress_ft, *skip_ft;
368 struct mlx5_eswitch *esw = br_offloads->esw;
369 int err;
370
371 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
372 return -EOPNOTSUPP;
373
374 ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
375 MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
376 esw);
377 if (IS_ERR(ingress_ft))
378 return PTR_ERR(ingress_ft);
379
380 skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
381 MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
382 esw);
383 if (IS_ERR(skip_ft)) {
384 err = PTR_ERR(skip_ft);
385 goto err_skip_tbl;
386 }
387
388 vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft);
389 if (IS_ERR(vlan_fg)) {
390 err = PTR_ERR(vlan_fg);
391 goto err_vlan_fg;
392 }
393
394 vlan_filter_fg = mlx5_esw_bridge_ingress_vlan_filter_fg_create(esw, ingress_ft);
395 if (IS_ERR(vlan_filter_fg)) {
396 err = PTR_ERR(vlan_filter_fg);
397 goto err_vlan_filter_fg;
398 }
399
400 qinq_fg = mlx5_esw_bridge_ingress_qinq_fg_create(esw, ingress_ft);
401 if (IS_ERR(qinq_fg)) {
402 err = PTR_ERR(qinq_fg);
403 goto err_qinq_fg;
404 }
405
406 qinq_filter_fg = mlx5_esw_bridge_ingress_qinq_filter_fg_create(esw, ingress_ft);
407 if (IS_ERR(qinq_filter_fg)) {
408 err = PTR_ERR(qinq_filter_fg);
409 goto err_qinq_filter_fg;
410 }
411
412 mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
413 if (IS_ERR(mac_fg)) {
414 err = PTR_ERR(mac_fg);
415 goto err_mac_fg;
416 }
417
418 br_offloads->ingress_ft = ingress_ft;
419 br_offloads->skip_ft = skip_ft;
420 br_offloads->ingress_vlan_fg = vlan_fg;
421 br_offloads->ingress_vlan_filter_fg = vlan_filter_fg;
422 br_offloads->ingress_qinq_fg = qinq_fg;
423 br_offloads->ingress_qinq_filter_fg = qinq_filter_fg;
424 br_offloads->ingress_mac_fg = mac_fg;
425 return 0;
426
427 err_mac_fg:
428 mlx5_destroy_flow_group(qinq_filter_fg);
429 err_qinq_filter_fg:
430 mlx5_destroy_flow_group(qinq_fg);
431 err_qinq_fg:
432 mlx5_destroy_flow_group(vlan_filter_fg);
433 err_vlan_filter_fg:
434 mlx5_destroy_flow_group(vlan_fg);
435 err_vlan_fg:
436 mlx5_destroy_flow_table(skip_ft);
437 err_skip_tbl:
438 mlx5_destroy_flow_table(ingress_ft);
439 return err;
440 }
441
442 static void
mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads * br_offloads)443 mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
444 {
445 mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
446 br_offloads->ingress_mac_fg = NULL;
447 mlx5_destroy_flow_group(br_offloads->ingress_qinq_filter_fg);
448 br_offloads->ingress_qinq_filter_fg = NULL;
449 mlx5_destroy_flow_group(br_offloads->ingress_qinq_fg);
450 br_offloads->ingress_qinq_fg = NULL;
451 mlx5_destroy_flow_group(br_offloads->ingress_vlan_filter_fg);
452 br_offloads->ingress_vlan_filter_fg = NULL;
453 mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
454 br_offloads->ingress_vlan_fg = NULL;
455 mlx5_destroy_flow_table(br_offloads->skip_ft);
456 br_offloads->skip_ft = NULL;
457 mlx5_destroy_flow_table(br_offloads->ingress_ft);
458 br_offloads->ingress_ft = NULL;
459 }
460
461 static struct mlx5_flow_handle *
462 mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
463 struct mlx5_flow_table *skip_ft,
464 struct mlx5_pkt_reformat *pkt_reformat);
465
466 static int
mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads * br_offloads,struct mlx5_esw_bridge * bridge)467 mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
468 struct mlx5_esw_bridge *bridge)
469 {
470 struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg, *qinq_fg;
471 struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
472 struct mlx5_flow_handle *miss_handle = NULL;
473 struct mlx5_eswitch *esw = br_offloads->esw;
474 struct mlx5_flow_table *egress_ft;
475 int err;
476
477 egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
478 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
479 esw);
480 if (IS_ERR(egress_ft))
481 return PTR_ERR(egress_ft);
482
483 vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft);
484 if (IS_ERR(vlan_fg)) {
485 err = PTR_ERR(vlan_fg);
486 goto err_vlan_fg;
487 }
488
489 qinq_fg = mlx5_esw_bridge_egress_qinq_fg_create(esw, egress_ft);
490 if (IS_ERR(qinq_fg)) {
491 err = PTR_ERR(qinq_fg);
492 goto err_qinq_fg;
493 }
494
495 mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
496 if (IS_ERR(mac_fg)) {
497 err = PTR_ERR(mac_fg);
498 goto err_mac_fg;
499 }
500
501 if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
502 miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
503 if (IS_ERR(miss_fg)) {
504 esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
505 PTR_ERR(miss_fg));
506 miss_fg = NULL;
507 goto skip_miss_flow;
508 }
509
510 miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
511 if (IS_ERR(miss_pkt_reformat)) {
512 esw_warn(esw->dev,
513 "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
514 PTR_ERR(miss_pkt_reformat));
515 miss_pkt_reformat = NULL;
516 mlx5_destroy_flow_group(miss_fg);
517 miss_fg = NULL;
518 goto skip_miss_flow;
519 }
520
521 miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft,
522 br_offloads->skip_ft,
523 miss_pkt_reformat);
524 if (IS_ERR(miss_handle)) {
525 esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
526 PTR_ERR(miss_handle));
527 miss_handle = NULL;
528 mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
529 miss_pkt_reformat = NULL;
530 mlx5_destroy_flow_group(miss_fg);
531 miss_fg = NULL;
532 goto skip_miss_flow;
533 }
534 }
535 skip_miss_flow:
536
537 bridge->egress_ft = egress_ft;
538 bridge->egress_vlan_fg = vlan_fg;
539 bridge->egress_qinq_fg = qinq_fg;
540 bridge->egress_mac_fg = mac_fg;
541 bridge->egress_miss_fg = miss_fg;
542 bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
543 bridge->egress_miss_handle = miss_handle;
544 return 0;
545
546 err_mac_fg:
547 mlx5_destroy_flow_group(qinq_fg);
548 err_qinq_fg:
549 mlx5_destroy_flow_group(vlan_fg);
550 err_vlan_fg:
551 mlx5_destroy_flow_table(egress_ft);
552 return err;
553 }
554
555 static void
mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge * bridge)556 mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
557 {
558 if (bridge->egress_miss_handle)
559 mlx5_del_flow_rules(bridge->egress_miss_handle);
560 if (bridge->egress_miss_pkt_reformat)
561 mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev,
562 bridge->egress_miss_pkt_reformat);
563 if (bridge->egress_miss_fg)
564 mlx5_destroy_flow_group(bridge->egress_miss_fg);
565 mlx5_destroy_flow_group(bridge->egress_mac_fg);
566 mlx5_destroy_flow_group(bridge->egress_qinq_fg);
567 mlx5_destroy_flow_group(bridge->egress_vlan_fg);
568 mlx5_destroy_flow_table(bridge->egress_ft);
569 }
570
571 static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num,const unsigned char * addr,struct mlx5_esw_bridge_vlan * vlan,struct mlx5_fc * counter,struct mlx5_esw_bridge * bridge,struct mlx5_eswitch * esw)572 mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
573 struct mlx5_esw_bridge_vlan *vlan,
574 struct mlx5_fc *counter,
575 struct mlx5_esw_bridge *bridge,
576 struct mlx5_eswitch *esw)
577 {
578 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
579 struct mlx5_flow_act flow_act = {
580 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
581 .flags = FLOW_ACT_NO_APPEND,
582 };
583 struct mlx5_flow_destination dests[2] = {};
584 struct mlx5_flow_spec *rule_spec;
585 struct mlx5_flow_handle *handle;
586 u8 *smac_v, *smac_c;
587
588 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
589 if (!rule_spec)
590 return ERR_PTR(-ENOMEM);
591
592 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
593
594 smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
595 outer_headers.smac_47_16);
596 ether_addr_copy(smac_v, addr);
597 smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
598 outer_headers.smac_47_16);
599 eth_broadcast_addr(smac_c);
600
601 MLX5_SET(fte_match_param, rule_spec->match_criteria,
602 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
603 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
604 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
605
606 if (vlan && vlan->pkt_reformat_push) {
607 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
608 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
609 flow_act.pkt_reformat = vlan->pkt_reformat_push;
610 flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
611 } else if (vlan) {
612 if (bridge->vlan_proto == ETH_P_8021Q) {
613 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
614 outer_headers.cvlan_tag);
615 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
616 outer_headers.cvlan_tag);
617 } else if (bridge->vlan_proto == ETH_P_8021AD) {
618 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
619 outer_headers.svlan_tag);
620 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
621 outer_headers.svlan_tag);
622 }
623 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
624 outer_headers.first_vid);
625 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
626 vlan->vid);
627 }
628
629 dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
630 dests[0].ft = bridge->egress_ft;
631 dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
632 dests[1].counter = counter;
633
634 handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
635 ARRAY_SIZE(dests));
636
637 kvfree(rule_spec);
638 return handle;
639 }
640
641 static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_create(u16 vport_num,const unsigned char * addr,struct mlx5_esw_bridge_vlan * vlan,struct mlx5_fc * counter,struct mlx5_esw_bridge * bridge)642 mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
643 struct mlx5_esw_bridge_vlan *vlan,
644 struct mlx5_fc *counter,
645 struct mlx5_esw_bridge *bridge)
646 {
647 return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
648 bridge, bridge->br_offloads->esw);
649 }
650
651 static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num,u16 esw_owner_vhca_id,const unsigned char * addr,struct mlx5_esw_bridge_vlan * vlan,struct mlx5_fc * counter,struct mlx5_esw_bridge * bridge)652 mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
653 const unsigned char *addr,
654 struct mlx5_esw_bridge_vlan *vlan,
655 struct mlx5_fc *counter,
656 struct mlx5_esw_bridge *bridge)
657 {
658 struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
659 struct mlx5_eswitch *tmp, *peer_esw = NULL;
660 static struct mlx5_flow_handle *handle;
661
662 if (!mlx5_devcom_for_each_peer_begin(devcom))
663 return ERR_PTR(-ENODEV);
664
665 mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
666 if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
667 peer_esw = tmp;
668 break;
669 }
670 }
671
672 if (!peer_esw) {
673 handle = ERR_PTR(-ENODEV);
674 goto out;
675 }
676
677 handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
678 bridge, peer_esw);
679
680 out:
681 mlx5_devcom_for_each_peer_end(devcom);
682 return handle;
683 }
684
685 static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num,const unsigned char * addr,struct mlx5_esw_bridge * bridge)686 mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
687 struct mlx5_esw_bridge *bridge)
688 {
689 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
690 struct mlx5_flow_destination dest = {
691 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
692 .ft = br_offloads->skip_ft,
693 };
694 struct mlx5_flow_act flow_act = {
695 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
696 .flags = FLOW_ACT_NO_APPEND,
697 };
698 struct mlx5_flow_spec *rule_spec;
699 struct mlx5_flow_handle *handle;
700 u8 *smac_v, *smac_c;
701
702 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
703 if (!rule_spec)
704 return ERR_PTR(-ENOMEM);
705
706 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
707
708 smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
709 outer_headers.smac_47_16);
710 ether_addr_copy(smac_v, addr);
711 smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
712 outer_headers.smac_47_16);
713 eth_broadcast_addr(smac_c);
714
715 MLX5_SET(fte_match_param, rule_spec->match_criteria,
716 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
717 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
718 mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
719
720 if (bridge->vlan_proto == ETH_P_8021Q) {
721 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
722 outer_headers.cvlan_tag);
723 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
724 outer_headers.cvlan_tag);
725 } else if (bridge->vlan_proto == ETH_P_8021AD) {
726 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
727 outer_headers.svlan_tag);
728 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
729 outer_headers.svlan_tag);
730 }
731
732 handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
733
734 kvfree(rule_spec);
735 return handle;
736 }
737
738 static struct mlx5_flow_handle *
mlx5_esw_bridge_egress_flow_create(u16 vport_num,u16 esw_owner_vhca_id,const unsigned char * addr,struct mlx5_esw_bridge_vlan * vlan,struct mlx5_esw_bridge * bridge)739 mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr,
740 struct mlx5_esw_bridge_vlan *vlan,
741 struct mlx5_esw_bridge *bridge)
742 {
743 struct mlx5_flow_destination dest = {
744 .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
745 .vport.num = vport_num,
746 };
747 struct mlx5_flow_act flow_act = {
748 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
749 .flags = FLOW_ACT_NO_APPEND,
750 };
751 struct mlx5_flow_spec *rule_spec;
752 struct mlx5_flow_handle *handle;
753 u8 *dmac_v, *dmac_c;
754
755 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
756 if (!rule_spec)
757 return ERR_PTR(-ENOMEM);
758
759 if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
760 vport_num == MLX5_VPORT_UPLINK)
761 rule_spec->flow_context.flow_source =
762 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
763 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
764
765 dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
766 outer_headers.dmac_47_16);
767 ether_addr_copy(dmac_v, addr);
768 dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
769 outer_headers.dmac_47_16);
770 eth_broadcast_addr(dmac_c);
771
772 if (vlan) {
773 if (vlan->pkt_reformat_pop) {
774 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
775 flow_act.pkt_reformat = vlan->pkt_reformat_pop;
776 }
777
778 if (bridge->vlan_proto == ETH_P_8021Q) {
779 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
780 outer_headers.cvlan_tag);
781 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
782 outer_headers.cvlan_tag);
783 } else if (bridge->vlan_proto == ETH_P_8021AD) {
784 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
785 outer_headers.svlan_tag);
786 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
787 outer_headers.svlan_tag);
788 }
789 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
790 outer_headers.first_vid);
791 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
792 vlan->vid);
793 }
794
795 if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
796 dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
797 dest.vport.vhca_id = esw_owner_vhca_id;
798 }
799 handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
800
801 kvfree(rule_spec);
802 return handle;
803 }
804
805 static struct mlx5_flow_handle *
mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table * egress_ft,struct mlx5_flow_table * skip_ft,struct mlx5_pkt_reformat * pkt_reformat)806 mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
807 struct mlx5_flow_table *skip_ft,
808 struct mlx5_pkt_reformat *pkt_reformat)
809 {
810 struct mlx5_flow_destination dest = {
811 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
812 .ft = skip_ft,
813 };
814 struct mlx5_flow_act flow_act = {
815 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
816 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT,
817 .flags = FLOW_ACT_NO_APPEND,
818 .pkt_reformat = pkt_reformat,
819 };
820 struct mlx5_flow_spec *rule_spec;
821 struct mlx5_flow_handle *handle;
822
823 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
824 if (!rule_spec)
825 return ERR_PTR(-ENOMEM);
826
827 rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
828
829 MLX5_SET(fte_match_param, rule_spec->match_criteria,
830 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
831 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1,
832 ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK);
833
834 handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1);
835
836 kvfree(rule_spec);
837 return handle;
838 }
839
mlx5_esw_bridge_create(struct net_device * br_netdev,struct mlx5_esw_bridge_offloads * br_offloads)840 static struct mlx5_esw_bridge *mlx5_esw_bridge_create(struct net_device *br_netdev,
841 struct mlx5_esw_bridge_offloads *br_offloads)
842 {
843 struct mlx5_esw_bridge *bridge;
844 int err;
845
846 bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
847 if (!bridge)
848 return ERR_PTR(-ENOMEM);
849
850 bridge->br_offloads = br_offloads;
851 err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
852 if (err)
853 goto err_egress_tbl;
854
855 err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
856 if (err)
857 goto err_fdb_ht;
858
859 err = mlx5_esw_bridge_mdb_init(bridge);
860 if (err)
861 goto err_mdb_ht;
862
863 INIT_LIST_HEAD(&bridge->fdb_list);
864 bridge->ifindex = br_netdev->ifindex;
865 bridge->refcnt = 1;
866 bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
867 bridge->vlan_proto = ETH_P_8021Q;
868 list_add(&bridge->list, &br_offloads->bridges);
869 mlx5_esw_bridge_debugfs_init(br_netdev, bridge);
870
871 return bridge;
872
873 err_mdb_ht:
874 rhashtable_destroy(&bridge->fdb_ht);
875 err_fdb_ht:
876 mlx5_esw_bridge_egress_table_cleanup(bridge);
877 err_egress_tbl:
878 kvfree(bridge);
879 return ERR_PTR(err);
880 }
881
mlx5_esw_bridge_get(struct mlx5_esw_bridge * bridge)882 static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
883 {
884 bridge->refcnt++;
885 }
886
mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads * br_offloads,struct mlx5_esw_bridge * bridge)887 static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
888 struct mlx5_esw_bridge *bridge)
889 {
890 if (--bridge->refcnt)
891 return;
892
893 mlx5_esw_bridge_debugfs_cleanup(bridge);
894 mlx5_esw_bridge_egress_table_cleanup(bridge);
895 mlx5_esw_bridge_mcast_disable(bridge);
896 list_del(&bridge->list);
897 mlx5_esw_bridge_mdb_cleanup(bridge);
898 rhashtable_destroy(&bridge->fdb_ht);
899 kvfree(bridge);
900
901 if (list_empty(&br_offloads->bridges))
902 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
903 }
904
905 static struct mlx5_esw_bridge *
mlx5_esw_bridge_lookup(struct net_device * br_netdev,struct mlx5_esw_bridge_offloads * br_offloads)906 mlx5_esw_bridge_lookup(struct net_device *br_netdev, struct mlx5_esw_bridge_offloads *br_offloads)
907 {
908 struct mlx5_esw_bridge *bridge;
909
910 ASSERT_RTNL();
911
912 list_for_each_entry(bridge, &br_offloads->bridges, list) {
913 if (bridge->ifindex == br_netdev->ifindex) {
914 mlx5_esw_bridge_get(bridge);
915 return bridge;
916 }
917 }
918
919 if (!br_offloads->ingress_ft) {
920 int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
921
922 if (err)
923 return ERR_PTR(err);
924 }
925
926 bridge = mlx5_esw_bridge_create(br_netdev, br_offloads);
927 if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
928 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
929 return bridge;
930 }
931
mlx5_esw_bridge_port_key_from_data(u16 vport_num,u16 esw_owner_vhca_id)932 static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_owner_vhca_id)
933 {
934 return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE;
935 }
936
mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port * port)937 unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port)
938 {
939 return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id);
940 }
941
mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge_offloads * br_offloads)942 static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
943 struct mlx5_esw_bridge_offloads *br_offloads)
944 {
945 return xa_insert(&br_offloads->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
946 }
947
948 static struct mlx5_esw_bridge_port *
mlx5_esw_bridge_port_lookup(u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads)949 mlx5_esw_bridge_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
950 struct mlx5_esw_bridge_offloads *br_offloads)
951 {
952 return xa_load(&br_offloads->ports, mlx5_esw_bridge_port_key_from_data(vport_num,
953 esw_owner_vhca_id));
954 }
955
mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge_offloads * br_offloads)956 static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
957 struct mlx5_esw_bridge_offloads *br_offloads)
958 {
959 xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port));
960 }
961
962 static struct mlx5_esw_bridge *
mlx5_esw_bridge_from_port_lookup(u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads)963 mlx5_esw_bridge_from_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
964 struct mlx5_esw_bridge_offloads *br_offloads)
965 {
966 struct mlx5_esw_bridge_port *port;
967
968 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
969 if (!port)
970 return NULL;
971
972 return port->bridge;
973 }
974
mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry * entry)975 static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry)
976 {
977 trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
978
979 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
980 entry->key.vid,
981 SWITCHDEV_FDB_ADD_TO_BRIDGE);
982 }
983
984 static void
mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry * entry,struct mlx5_esw_bridge * bridge)985 mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
986 struct mlx5_esw_bridge *bridge)
987 {
988 trace_mlx5_esw_bridge_fdb_entry_cleanup(entry);
989
990 rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
991 mlx5_del_flow_rules(entry->egress_handle);
992 if (entry->filter_handle)
993 mlx5_del_flow_rules(entry->filter_handle);
994 mlx5_del_flow_rules(entry->ingress_handle);
995 mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
996 list_del(&entry->vlan_list);
997 list_del(&entry->list);
998 kvfree(entry);
999 }
1000
1001 static void
mlx5_esw_bridge_fdb_entry_notify_and_cleanup(struct mlx5_esw_bridge_fdb_entry * entry,struct mlx5_esw_bridge * bridge)1002 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
1003 struct mlx5_esw_bridge *bridge)
1004 {
1005 mlx5_esw_bridge_fdb_del_notify(entry);
1006 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1007 }
1008
mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge * bridge)1009 static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
1010 {
1011 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1012
1013 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1014 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1015 }
1016
1017 static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_vlan_lookup(u16 vid,struct mlx5_esw_bridge_port * port)1018 mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
1019 {
1020 return xa_load(&port->vlans, vid);
1021 }
1022
1023 static int
mlx5_esw_bridge_vlan_push_create(u16 vlan_proto,struct mlx5_esw_bridge_vlan * vlan,struct mlx5_eswitch * esw)1024 mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vlan,
1025 struct mlx5_eswitch *esw)
1026 {
1027 struct {
1028 __be16 h_vlan_proto;
1029 __be16 h_vlan_TCI;
1030 } vlan_hdr = { htons(vlan_proto), htons(vlan->vid) };
1031 struct mlx5_pkt_reformat_params reformat_params = {};
1032 struct mlx5_pkt_reformat *pkt_reformat;
1033
1034 if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_insert)) ||
1035 MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_size) < sizeof(vlan_hdr) ||
1036 MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_offset) <
1037 offsetof(struct vlan_ethhdr, h_vlan_proto)) {
1038 esw_warn(esw->dev, "Packet reformat INSERT_HEADER is not supported\n");
1039 return -EOPNOTSUPP;
1040 }
1041
1042 reformat_params.type = MLX5_REFORMAT_TYPE_INSERT_HDR;
1043 reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
1044 reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
1045 reformat_params.size = sizeof(vlan_hdr);
1046 reformat_params.data = &vlan_hdr;
1047 pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
1048 &reformat_params,
1049 MLX5_FLOW_NAMESPACE_FDB);
1050 if (IS_ERR(pkt_reformat)) {
1051 esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
1052 PTR_ERR(pkt_reformat));
1053 return PTR_ERR(pkt_reformat);
1054 }
1055
1056 vlan->pkt_reformat_push = pkt_reformat;
1057 return 0;
1058 }
1059
1060 static void
mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan * vlan,struct mlx5_eswitch * esw)1061 mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1062 {
1063 mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_push);
1064 vlan->pkt_reformat_push = NULL;
1065 }
1066
1067 static int
mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan * vlan,struct mlx5_eswitch * esw)1068 mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1069 {
1070 struct mlx5_pkt_reformat *pkt_reformat;
1071
1072 if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
1073 esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
1074 return -EOPNOTSUPP;
1075 }
1076
1077 pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
1078 if (IS_ERR(pkt_reformat)) {
1079 esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
1080 PTR_ERR(pkt_reformat));
1081 return PTR_ERR(pkt_reformat);
1082 }
1083
1084 vlan->pkt_reformat_pop = pkt_reformat;
1085 return 0;
1086 }
1087
1088 static void
mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan * vlan,struct mlx5_eswitch * esw)1089 mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1090 {
1091 mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_pop);
1092 vlan->pkt_reformat_pop = NULL;
1093 }
1094
1095 static int
mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan * vlan,struct mlx5_eswitch * esw)1096 mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1097 {
1098 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1099 struct mlx5_modify_hdr *pkt_mod_hdr;
1100
1101 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1102 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1103 MLX5_SET(set_action_in, action, offset, 8);
1104 MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
1105 MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
1106
1107 pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
1108 if (IS_ERR(pkt_mod_hdr))
1109 return PTR_ERR(pkt_mod_hdr);
1110
1111 vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
1112 return 0;
1113 }
1114
1115 static void
mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan * vlan,struct mlx5_eswitch * esw)1116 mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1117 {
1118 mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
1119 vlan->pkt_mod_hdr_push_mark = NULL;
1120 }
1121
1122 static int
mlx5_esw_bridge_vlan_push_pop_fhs_create(u16 vlan_proto,struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge_vlan * vlan)1123 mlx5_esw_bridge_vlan_push_pop_fhs_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
1124 struct mlx5_esw_bridge_vlan *vlan)
1125 {
1126 return mlx5_esw_bridge_vlan_mcast_init(vlan_proto, port, vlan);
1127 }
1128
1129 static void
mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(struct mlx5_esw_bridge_vlan * vlan)1130 mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(struct mlx5_esw_bridge_vlan *vlan)
1131 {
1132 mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
1133 }
1134
1135 static int
mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto,u16 flags,struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge_vlan * vlan,struct mlx5_eswitch * esw)1136 mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_port *port,
1137 struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1138 {
1139 int err;
1140
1141 if (flags & BRIDGE_VLAN_INFO_PVID) {
1142 err = mlx5_esw_bridge_vlan_push_create(vlan_proto, vlan, esw);
1143 if (err)
1144 return err;
1145
1146 err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
1147 if (err)
1148 goto err_vlan_push_mark;
1149 }
1150
1151 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
1152 err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
1153 if (err)
1154 goto err_vlan_pop;
1155
1156 err = mlx5_esw_bridge_vlan_push_pop_fhs_create(vlan_proto, port, vlan);
1157 if (err)
1158 goto err_vlan_pop_fhs;
1159 }
1160
1161 return 0;
1162
1163 err_vlan_pop_fhs:
1164 mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1165 err_vlan_pop:
1166 if (vlan->pkt_mod_hdr_push_mark)
1167 mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1168 err_vlan_push_mark:
1169 if (vlan->pkt_reformat_push)
1170 mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1171 return err;
1172 }
1173
1174 static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_vlan_create(u16 vlan_proto,u16 vid,u16 flags,struct mlx5_esw_bridge_port * port,struct mlx5_eswitch * esw)1175 mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
1176 struct mlx5_eswitch *esw)
1177 {
1178 struct mlx5_esw_bridge_vlan *vlan;
1179 int err;
1180
1181 vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
1182 if (!vlan)
1183 return ERR_PTR(-ENOMEM);
1184
1185 vlan->vid = vid;
1186 vlan->flags = flags;
1187 INIT_LIST_HEAD(&vlan->fdb_list);
1188
1189 err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, port, vlan, esw);
1190 if (err)
1191 goto err_vlan_push_pop;
1192
1193 err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
1194 if (err)
1195 goto err_xa_insert;
1196
1197 trace_mlx5_esw_bridge_vlan_create(vlan);
1198 return vlan;
1199
1200 err_xa_insert:
1201 if (vlan->mcast_handle)
1202 mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(vlan);
1203 if (vlan->pkt_reformat_pop)
1204 mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1205 if (vlan->pkt_mod_hdr_push_mark)
1206 mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1207 if (vlan->pkt_reformat_push)
1208 mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1209 err_vlan_push_pop:
1210 kvfree(vlan);
1211 return ERR_PTR(err);
1212 }
1213
mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge_vlan * vlan)1214 static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
1215 struct mlx5_esw_bridge_vlan *vlan)
1216 {
1217 xa_erase(&port->vlans, vlan->vid);
1218 }
1219
mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge_vlan * vlan,struct mlx5_esw_bridge * bridge)1220 static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_port *port,
1221 struct mlx5_esw_bridge_vlan *vlan,
1222 struct mlx5_esw_bridge *bridge)
1223 {
1224 struct mlx5_eswitch *esw = bridge->br_offloads->esw;
1225 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1226
1227 list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list)
1228 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1229 mlx5_esw_bridge_port_mdb_vlan_flush(port, vlan);
1230
1231 if (vlan->mcast_handle)
1232 mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(vlan);
1233 if (vlan->pkt_reformat_pop)
1234 mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1235 if (vlan->pkt_mod_hdr_push_mark)
1236 mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1237 if (vlan->pkt_reformat_push)
1238 mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1239 }
1240
mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge_vlan * vlan,struct mlx5_esw_bridge * bridge)1241 static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
1242 struct mlx5_esw_bridge_vlan *vlan,
1243 struct mlx5_esw_bridge *bridge)
1244 {
1245 trace_mlx5_esw_bridge_vlan_cleanup(vlan);
1246 mlx5_esw_bridge_vlan_flush(port, vlan, bridge);
1247 mlx5_esw_bridge_vlan_erase(port, vlan);
1248 kvfree(vlan);
1249 }
1250
mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge * bridge)1251 static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
1252 struct mlx5_esw_bridge *bridge)
1253 {
1254 struct mlx5_esw_bridge_vlan *vlan;
1255 unsigned long index;
1256
1257 xa_for_each(&port->vlans, index, vlan)
1258 mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
1259 }
1260
mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge * bridge)1261 static int mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port *port,
1262 struct mlx5_esw_bridge *bridge)
1263 {
1264 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1265 struct mlx5_esw_bridge_vlan *vlan;
1266 unsigned long i;
1267 int err;
1268
1269 xa_for_each(&port->vlans, i, vlan) {
1270 mlx5_esw_bridge_vlan_flush(port, vlan, bridge);
1271 err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, port,
1272 vlan, br_offloads->esw);
1273 if (err) {
1274 esw_warn(br_offloads->esw->dev,
1275 "Failed to create VLAN=%u(proto=%x) push/pop actions (vport=%u,err=%d)\n",
1276 vlan->vid, bridge->vlan_proto, port->vport_num,
1277 err);
1278 return err;
1279 }
1280 }
1281
1282 return 0;
1283 }
1284
1285 static int
mlx5_esw_bridge_vlans_recreate(struct mlx5_esw_bridge * bridge)1286 mlx5_esw_bridge_vlans_recreate(struct mlx5_esw_bridge *bridge)
1287 {
1288 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1289 struct mlx5_esw_bridge_port *port;
1290 unsigned long i;
1291 int err;
1292
1293 xa_for_each(&br_offloads->ports, i, port) {
1294 if (port->bridge != bridge)
1295 continue;
1296
1297 err = mlx5_esw_bridge_port_vlans_recreate(port, bridge);
1298 if (err)
1299 return err;
1300 }
1301
1302 return 0;
1303 }
1304
1305 static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_port_vlan_lookup(u16 vid,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge * bridge,struct mlx5_eswitch * esw)1306 mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
1307 struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
1308 {
1309 struct mlx5_esw_bridge_port *port;
1310 struct mlx5_esw_bridge_vlan *vlan;
1311
1312 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, bridge->br_offloads);
1313 if (!port) {
1314 /* FDB is added asynchronously on wq while port might have been deleted
1315 * concurrently. Report on 'info' logging level and skip the FDB offload.
1316 */
1317 esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
1318 return ERR_PTR(-EINVAL);
1319 }
1320
1321 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1322 if (!vlan) {
1323 /* FDB is added asynchronously on wq while vlan might have been deleted
1324 * concurrently. Report on 'info' logging level and skip the FDB offload.
1325 */
1326 esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
1327 vport_num);
1328 return ERR_PTR(-EINVAL);
1329 }
1330
1331 return vlan;
1332 }
1333
1334 static struct mlx5_esw_bridge_fdb_entry *
mlx5_esw_bridge_fdb_lookup(struct mlx5_esw_bridge * bridge,const unsigned char * addr,u16 vid)1335 mlx5_esw_bridge_fdb_lookup(struct mlx5_esw_bridge *bridge,
1336 const unsigned char *addr, u16 vid)
1337 {
1338 struct mlx5_esw_bridge_fdb_key key = {};
1339
1340 ether_addr_copy(key.addr, addr);
1341 key.vid = vid;
1342 return rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
1343 }
1344
1345 static struct mlx5_esw_bridge_fdb_entry *
mlx5_esw_bridge_fdb_entry_init(struct net_device * dev,u16 vport_num,u16 esw_owner_vhca_id,const unsigned char * addr,u16 vid,bool added_by_user,bool peer,struct mlx5_eswitch * esw,struct mlx5_esw_bridge * bridge)1346 mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1347 const unsigned char *addr, u16 vid, bool added_by_user, bool peer,
1348 struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge)
1349 {
1350 struct mlx5_esw_bridge_vlan *vlan = NULL;
1351 struct mlx5_esw_bridge_fdb_entry *entry;
1352 struct mlx5_flow_handle *handle;
1353 struct mlx5_fc *counter;
1354 int err;
1355
1356 if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
1357 vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, esw_owner_vhca_id, bridge,
1358 esw);
1359 if (IS_ERR(vlan))
1360 return ERR_CAST(vlan);
1361 }
1362
1363 entry = mlx5_esw_bridge_fdb_lookup(bridge, addr, vid);
1364 if (entry)
1365 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1366
1367 entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
1368 if (!entry)
1369 return ERR_PTR(-ENOMEM);
1370
1371 ether_addr_copy(entry->key.addr, addr);
1372 entry->key.vid = vid;
1373 entry->dev = dev;
1374 entry->vport_num = vport_num;
1375 entry->esw_owner_vhca_id = esw_owner_vhca_id;
1376 entry->lastuse = jiffies;
1377 if (added_by_user)
1378 entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
1379 if (peer)
1380 entry->flags |= MLX5_ESW_BRIDGE_FLAG_PEER;
1381
1382 counter = mlx5_fc_create(esw->dev, true);
1383 if (IS_ERR(counter)) {
1384 err = PTR_ERR(counter);
1385 goto err_ingress_fc_create;
1386 }
1387 entry->ingress_counter = counter;
1388
1389 handle = peer ?
1390 mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
1391 addr, vlan, counter, bridge) :
1392 mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
1393 counter, bridge);
1394 if (IS_ERR(handle)) {
1395 err = PTR_ERR(handle);
1396 esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
1397 vport_num, err, peer);
1398 goto err_ingress_flow_create;
1399 }
1400 entry->ingress_handle = handle;
1401
1402 if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG) {
1403 handle = mlx5_esw_bridge_ingress_filter_flow_create(vport_num, addr, bridge);
1404 if (IS_ERR(handle)) {
1405 err = PTR_ERR(handle);
1406 esw_warn(esw->dev, "Failed to create ingress filter(vport=%u,err=%d)\n",
1407 vport_num, err);
1408 goto err_ingress_filter_flow_create;
1409 }
1410 entry->filter_handle = handle;
1411 }
1412
1413 handle = mlx5_esw_bridge_egress_flow_create(vport_num, esw_owner_vhca_id, addr, vlan,
1414 bridge);
1415 if (IS_ERR(handle)) {
1416 err = PTR_ERR(handle);
1417 esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
1418 vport_num, err);
1419 goto err_egress_flow_create;
1420 }
1421 entry->egress_handle = handle;
1422
1423 err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
1424 if (err) {
1425 esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
1426 goto err_ht_init;
1427 }
1428
1429 if (vlan)
1430 list_add(&entry->vlan_list, &vlan->fdb_list);
1431 else
1432 INIT_LIST_HEAD(&entry->vlan_list);
1433 list_add(&entry->list, &bridge->fdb_list);
1434
1435 trace_mlx5_esw_bridge_fdb_entry_init(entry);
1436 return entry;
1437
1438 err_ht_init:
1439 mlx5_del_flow_rules(entry->egress_handle);
1440 err_egress_flow_create:
1441 if (entry->filter_handle)
1442 mlx5_del_flow_rules(entry->filter_handle);
1443 err_ingress_filter_flow_create:
1444 mlx5_del_flow_rules(entry->ingress_handle);
1445 err_ingress_flow_create:
1446 mlx5_fc_destroy(esw->dev, entry->ingress_counter);
1447 err_ingress_fc_create:
1448 kvfree(entry);
1449 return ERR_PTR(err);
1450 }
1451
mlx5_esw_bridge_ageing_time_set(u16 vport_num,u16 esw_owner_vhca_id,unsigned long ageing_time,struct mlx5_esw_bridge_offloads * br_offloads)1452 int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time,
1453 struct mlx5_esw_bridge_offloads *br_offloads)
1454 {
1455 struct mlx5_esw_bridge *bridge;
1456
1457 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1458 if (!bridge)
1459 return -EINVAL;
1460
1461 bridge->ageing_time = clock_t_to_jiffies(ageing_time);
1462 return 0;
1463 }
1464
mlx5_esw_bridge_vlan_filtering_set(u16 vport_num,u16 esw_owner_vhca_id,bool enable,struct mlx5_esw_bridge_offloads * br_offloads)1465 int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1466 struct mlx5_esw_bridge_offloads *br_offloads)
1467 {
1468 struct mlx5_esw_bridge *bridge;
1469 bool filtering;
1470
1471 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1472 if (!bridge)
1473 return -EINVAL;
1474
1475 filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1476 if (filtering == enable)
1477 return 0;
1478
1479 mlx5_esw_bridge_fdb_flush(bridge);
1480 mlx5_esw_bridge_mdb_flush(bridge);
1481 if (enable)
1482 bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1483 else
1484 bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1485
1486 return 0;
1487 }
1488
mlx5_esw_bridge_vlan_proto_set(u16 vport_num,u16 esw_owner_vhca_id,u16 proto,struct mlx5_esw_bridge_offloads * br_offloads)1489 int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
1490 struct mlx5_esw_bridge_offloads *br_offloads)
1491 {
1492 struct mlx5_esw_bridge *bridge;
1493
1494 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id,
1495 br_offloads);
1496 if (!bridge)
1497 return -EINVAL;
1498
1499 if (bridge->vlan_proto == proto)
1500 return 0;
1501 if (proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
1502 esw_warn(br_offloads->esw->dev, "Can't set unsupported VLAN protocol %x", proto);
1503 return -EOPNOTSUPP;
1504 }
1505
1506 mlx5_esw_bridge_fdb_flush(bridge);
1507 mlx5_esw_bridge_mdb_flush(bridge);
1508 bridge->vlan_proto = proto;
1509 mlx5_esw_bridge_vlans_recreate(bridge);
1510
1511 return 0;
1512 }
1513
mlx5_esw_bridge_mcast_set(u16 vport_num,u16 esw_owner_vhca_id,bool enable,struct mlx5_esw_bridge_offloads * br_offloads)1514 int mlx5_esw_bridge_mcast_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1515 struct mlx5_esw_bridge_offloads *br_offloads)
1516 {
1517 struct mlx5_eswitch *esw = br_offloads->esw;
1518 struct mlx5_esw_bridge *bridge;
1519 int err = 0;
1520 bool mcast;
1521
1522 if (!(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_multi_path_any_table) ||
1523 MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_multi_path_any_table_limit_regc)) ||
1524 !MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_uplink_hairpin) ||
1525 !MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
1526 return -EOPNOTSUPP;
1527
1528 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1529 if (!bridge)
1530 return -EINVAL;
1531
1532 mcast = bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG;
1533 if (mcast == enable)
1534 return 0;
1535
1536 if (enable)
1537 err = mlx5_esw_bridge_mcast_enable(bridge);
1538 else
1539 mlx5_esw_bridge_mcast_disable(bridge);
1540
1541 return err;
1542 }
1543
mlx5_esw_bridge_vport_init(u16 vport_num,u16 esw_owner_vhca_id,u16 flags,struct mlx5_esw_bridge_offloads * br_offloads,struct mlx5_esw_bridge * bridge)1544 static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
1545 struct mlx5_esw_bridge_offloads *br_offloads,
1546 struct mlx5_esw_bridge *bridge)
1547 {
1548 struct mlx5_eswitch *esw = br_offloads->esw;
1549 struct mlx5_esw_bridge_port *port;
1550 int err;
1551
1552 port = kvzalloc(sizeof(*port), GFP_KERNEL);
1553 if (!port)
1554 return -ENOMEM;
1555
1556 port->vport_num = vport_num;
1557 port->esw_owner_vhca_id = esw_owner_vhca_id;
1558 port->bridge = bridge;
1559 port->flags |= flags;
1560 xa_init(&port->vlans);
1561
1562 err = mlx5_esw_bridge_port_mcast_init(port);
1563 if (err) {
1564 esw_warn(esw->dev,
1565 "Failed to initialize port multicast (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1566 port->vport_num, port->esw_owner_vhca_id, err);
1567 goto err_port_mcast;
1568 }
1569
1570 err = mlx5_esw_bridge_port_insert(port, br_offloads);
1571 if (err) {
1572 esw_warn(esw->dev,
1573 "Failed to insert port metadata (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1574 port->vport_num, port->esw_owner_vhca_id, err);
1575 goto err_port_insert;
1576 }
1577 trace_mlx5_esw_bridge_vport_init(port);
1578
1579 return 0;
1580
1581 err_port_insert:
1582 mlx5_esw_bridge_port_mcast_cleanup(port);
1583 err_port_mcast:
1584 kvfree(port);
1585 return err;
1586 }
1587
mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads * br_offloads,struct mlx5_esw_bridge_port * port)1588 static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
1589 struct mlx5_esw_bridge_port *port)
1590 {
1591 u16 vport_num = port->vport_num, esw_owner_vhca_id = port->esw_owner_vhca_id;
1592 struct mlx5_esw_bridge *bridge = port->bridge;
1593 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1594
1595 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1596 if (entry->vport_num == vport_num && entry->esw_owner_vhca_id == esw_owner_vhca_id)
1597 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1598
1599 trace_mlx5_esw_bridge_vport_cleanup(port);
1600 mlx5_esw_bridge_port_vlans_flush(port, bridge);
1601 mlx5_esw_bridge_port_mcast_cleanup(port);
1602 mlx5_esw_bridge_port_erase(port, br_offloads);
1603 kvfree(port);
1604 mlx5_esw_bridge_put(br_offloads, bridge);
1605 return 0;
1606 }
1607
mlx5_esw_bridge_vport_link_with_flags(struct net_device * br_netdev,u16 vport_num,u16 esw_owner_vhca_id,u16 flags,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1608 static int mlx5_esw_bridge_vport_link_with_flags(struct net_device *br_netdev, u16 vport_num,
1609 u16 esw_owner_vhca_id, u16 flags,
1610 struct mlx5_esw_bridge_offloads *br_offloads,
1611 struct netlink_ext_ack *extack)
1612 {
1613 struct mlx5_esw_bridge *bridge;
1614 int err;
1615
1616 bridge = mlx5_esw_bridge_lookup(br_netdev, br_offloads);
1617 if (IS_ERR(bridge)) {
1618 NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
1619 return PTR_ERR(bridge);
1620 }
1621
1622 err = mlx5_esw_bridge_vport_init(vport_num, esw_owner_vhca_id, flags, br_offloads, bridge);
1623 if (err) {
1624 NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
1625 goto err_vport;
1626 }
1627 return 0;
1628
1629 err_vport:
1630 mlx5_esw_bridge_put(br_offloads, bridge);
1631 return err;
1632 }
1633
mlx5_esw_bridge_vport_link(struct net_device * br_netdev,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1634 int mlx5_esw_bridge_vport_link(struct net_device *br_netdev, u16 vport_num, u16 esw_owner_vhca_id,
1635 struct mlx5_esw_bridge_offloads *br_offloads,
1636 struct netlink_ext_ack *extack)
1637 {
1638 return mlx5_esw_bridge_vport_link_with_flags(br_netdev, vport_num, esw_owner_vhca_id, 0,
1639 br_offloads, extack);
1640 }
1641
mlx5_esw_bridge_vport_unlink(struct net_device * br_netdev,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1642 int mlx5_esw_bridge_vport_unlink(struct net_device *br_netdev, u16 vport_num,
1643 u16 esw_owner_vhca_id,
1644 struct mlx5_esw_bridge_offloads *br_offloads,
1645 struct netlink_ext_ack *extack)
1646 {
1647 struct mlx5_esw_bridge_port *port;
1648 int err;
1649
1650 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1651 if (!port) {
1652 NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
1653 return -EINVAL;
1654 }
1655 if (port->bridge->ifindex != br_netdev->ifindex) {
1656 NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
1657 return -EINVAL;
1658 }
1659
1660 err = mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1661 if (err)
1662 NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
1663 return err;
1664 }
1665
mlx5_esw_bridge_vport_peer_link(struct net_device * br_netdev,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1666 int mlx5_esw_bridge_vport_peer_link(struct net_device *br_netdev, u16 vport_num,
1667 u16 esw_owner_vhca_id,
1668 struct mlx5_esw_bridge_offloads *br_offloads,
1669 struct netlink_ext_ack *extack)
1670 {
1671 if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch))
1672 return 0;
1673
1674 return mlx5_esw_bridge_vport_link_with_flags(br_netdev, vport_num, esw_owner_vhca_id,
1675 MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1676 br_offloads, extack);
1677 }
1678
mlx5_esw_bridge_vport_peer_unlink(struct net_device * br_netdev,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1679 int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_num,
1680 u16 esw_owner_vhca_id,
1681 struct mlx5_esw_bridge_offloads *br_offloads,
1682 struct netlink_ext_ack *extack)
1683 {
1684 return mlx5_esw_bridge_vport_unlink(br_netdev, vport_num, esw_owner_vhca_id, br_offloads,
1685 extack);
1686 }
1687
mlx5_esw_bridge_port_vlan_add(u16 vport_num,u16 esw_owner_vhca_id,u16 vid,u16 flags,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1688 int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
1689 struct mlx5_esw_bridge_offloads *br_offloads,
1690 struct netlink_ext_ack *extack)
1691 {
1692 struct mlx5_esw_bridge_port *port;
1693 struct mlx5_esw_bridge_vlan *vlan;
1694
1695 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1696 if (!port)
1697 return -EINVAL;
1698
1699 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1700 if (vlan) {
1701 if (vlan->flags == flags)
1702 return 0;
1703 mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1704 }
1705
1706 vlan = mlx5_esw_bridge_vlan_create(port->bridge->vlan_proto, vid, flags, port,
1707 br_offloads->esw);
1708 if (IS_ERR(vlan)) {
1709 NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
1710 return PTR_ERR(vlan);
1711 }
1712 return 0;
1713 }
1714
mlx5_esw_bridge_port_vlan_del(u16 vport_num,u16 esw_owner_vhca_id,u16 vid,struct mlx5_esw_bridge_offloads * br_offloads)1715 void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
1716 struct mlx5_esw_bridge_offloads *br_offloads)
1717 {
1718 struct mlx5_esw_bridge_port *port;
1719 struct mlx5_esw_bridge_vlan *vlan;
1720
1721 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1722 if (!port)
1723 return;
1724
1725 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1726 if (!vlan)
1727 return;
1728 mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1729 }
1730
mlx5_esw_bridge_fdb_update_used(struct net_device * dev,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct switchdev_notifier_fdb_info * fdb_info)1731 void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1732 struct mlx5_esw_bridge_offloads *br_offloads,
1733 struct switchdev_notifier_fdb_info *fdb_info)
1734 {
1735 struct mlx5_esw_bridge_fdb_entry *entry;
1736 struct mlx5_esw_bridge *bridge;
1737
1738 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1739 if (!bridge)
1740 return;
1741
1742 entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1743 if (!entry) {
1744 esw_debug(br_offloads->esw->dev,
1745 "FDB update entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1746 fdb_info->addr, fdb_info->vid, vport_num);
1747 return;
1748 }
1749
1750 entry->lastuse = jiffies;
1751 }
1752
mlx5_esw_bridge_fdb_mark_deleted(struct net_device * dev,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct switchdev_notifier_fdb_info * fdb_info)1753 void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1754 struct mlx5_esw_bridge_offloads *br_offloads,
1755 struct switchdev_notifier_fdb_info *fdb_info)
1756 {
1757 struct mlx5_esw_bridge_fdb_entry *entry;
1758 struct mlx5_esw_bridge *bridge;
1759
1760 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1761 if (!bridge)
1762 return;
1763
1764 entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1765 if (!entry) {
1766 esw_debug(br_offloads->esw->dev,
1767 "FDB mark deleted entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1768 fdb_info->addr, fdb_info->vid, vport_num);
1769 return;
1770 }
1771
1772 entry->flags |= MLX5_ESW_BRIDGE_FLAG_DELETED;
1773 }
1774
mlx5_esw_bridge_fdb_create(struct net_device * dev,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct switchdev_notifier_fdb_info * fdb_info)1775 void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1776 struct mlx5_esw_bridge_offloads *br_offloads,
1777 struct switchdev_notifier_fdb_info *fdb_info)
1778 {
1779 struct mlx5_esw_bridge_fdb_entry *entry;
1780 struct mlx5_esw_bridge_port *port;
1781 struct mlx5_esw_bridge *bridge;
1782
1783 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1784 if (!port)
1785 return;
1786
1787 bridge = port->bridge;
1788 entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, esw_owner_vhca_id, fdb_info->addr,
1789 fdb_info->vid, fdb_info->added_by_user,
1790 port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1791 br_offloads->esw, bridge);
1792 if (IS_ERR(entry))
1793 return;
1794
1795 if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1796 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1797 SWITCHDEV_FDB_OFFLOADED);
1798 else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER))
1799 /* Take over dynamic entries to prevent kernel bridge from aging them out. */
1800 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1801 SWITCHDEV_FDB_ADD_TO_BRIDGE);
1802 }
1803
mlx5_esw_bridge_fdb_remove(struct net_device * dev,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct switchdev_notifier_fdb_info * fdb_info)1804 void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1805 struct mlx5_esw_bridge_offloads *br_offloads,
1806 struct switchdev_notifier_fdb_info *fdb_info)
1807 {
1808 struct mlx5_eswitch *esw = br_offloads->esw;
1809 struct mlx5_esw_bridge_fdb_entry *entry;
1810 struct mlx5_esw_bridge *bridge;
1811
1812 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1813 if (!bridge)
1814 return;
1815
1816 entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1817 if (!entry) {
1818 esw_debug(esw->dev,
1819 "FDB remove entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1820 fdb_info->addr, fdb_info->vid, vport_num);
1821 return;
1822 }
1823
1824 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1825 }
1826
mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads * br_offloads)1827 void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
1828 {
1829 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1830 struct mlx5_esw_bridge *bridge;
1831
1832 list_for_each_entry(bridge, &br_offloads->bridges, list) {
1833 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1834 unsigned long lastuse =
1835 (unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
1836
1837 if (entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER |
1838 MLX5_ESW_BRIDGE_FLAG_DELETED))
1839 continue;
1840
1841 if (time_after(lastuse, entry->lastuse))
1842 mlx5_esw_bridge_fdb_entry_refresh(entry);
1843 else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
1844 time_is_before_jiffies(entry->lastuse + bridge->ageing_time))
1845 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1846 }
1847 }
1848 }
1849
mlx5_esw_bridge_port_mdb_add(struct net_device * dev,u16 vport_num,u16 esw_owner_vhca_id,const unsigned char * addr,u16 vid,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1850 int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1851 const unsigned char *addr, u16 vid,
1852 struct mlx5_esw_bridge_offloads *br_offloads,
1853 struct netlink_ext_ack *extack)
1854 {
1855 struct mlx5_esw_bridge_vlan *vlan;
1856 struct mlx5_esw_bridge_port *port;
1857 struct mlx5_esw_bridge *bridge;
1858 int err;
1859
1860 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1861 if (!port) {
1862 esw_warn(br_offloads->esw->dev,
1863 "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
1864 addr, vport_num);
1865 NL_SET_ERR_MSG_FMT_MOD(extack,
1866 "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
1867 addr, vport_num);
1868 return -EINVAL;
1869 }
1870
1871 bridge = port->bridge;
1872 if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
1873 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1874 if (!vlan) {
1875 esw_warn(br_offloads->esw->dev,
1876 "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
1877 addr, vid, vport_num);
1878 NL_SET_ERR_MSG_FMT_MOD(extack,
1879 "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
1880 addr, vid, vport_num);
1881 return -EINVAL;
1882 }
1883 }
1884
1885 err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid);
1886 if (err) {
1887 NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n",
1888 addr, vid, vport_num);
1889 return err;
1890 }
1891
1892 return 0;
1893 }
1894
mlx5_esw_bridge_port_mdb_del(struct net_device * dev,u16 vport_num,u16 esw_owner_vhca_id,const unsigned char * addr,u16 vid,struct mlx5_esw_bridge_offloads * br_offloads)1895 void mlx5_esw_bridge_port_mdb_del(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1896 const unsigned char *addr, u16 vid,
1897 struct mlx5_esw_bridge_offloads *br_offloads)
1898 {
1899 struct mlx5_esw_bridge_port *port;
1900
1901 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1902 if (!port)
1903 return;
1904
1905 mlx5_esw_bridge_port_mdb_detach(dev, port, addr, vid);
1906 }
1907
mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads * br_offloads)1908 static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
1909 {
1910 struct mlx5_esw_bridge_port *port;
1911 unsigned long i;
1912
1913 xa_for_each(&br_offloads->ports, i, port)
1914 mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1915
1916 WARN_ONCE(!list_empty(&br_offloads->bridges),
1917 "Cleaning up bridge offloads while still having bridges attached\n");
1918 }
1919
mlx5_esw_bridge_init(struct mlx5_eswitch * esw)1920 struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
1921 {
1922 struct mlx5_esw_bridge_offloads *br_offloads;
1923
1924 ASSERT_RTNL();
1925
1926 br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
1927 if (!br_offloads)
1928 return ERR_PTR(-ENOMEM);
1929
1930 INIT_LIST_HEAD(&br_offloads->bridges);
1931 xa_init(&br_offloads->ports);
1932 br_offloads->esw = esw;
1933 esw->br_offloads = br_offloads;
1934 mlx5_esw_bridge_debugfs_offloads_init(br_offloads);
1935
1936 return br_offloads;
1937 }
1938
mlx5_esw_bridge_cleanup(struct mlx5_eswitch * esw)1939 void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
1940 {
1941 struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
1942
1943 ASSERT_RTNL();
1944
1945 if (!br_offloads)
1946 return;
1947
1948 mlx5_esw_bridge_flush(br_offloads);
1949 WARN_ON(!xa_empty(&br_offloads->ports));
1950 mlx5_esw_bridge_debugfs_offloads_cleanup(br_offloads);
1951
1952 esw->br_offloads = NULL;
1953 kvfree(br_offloads);
1954 }
1955