1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
3
4 #include <linux/netdevice.h>
5 #include "lag.h"
6
7 enum {
8 MLX5_LAG_FT_LEVEL_TTC,
9 MLX5_LAG_FT_LEVEL_INNER_TTC,
10 MLX5_LAG_FT_LEVEL_DEFINER,
11 };
12
13 static struct mlx5_flow_group *
mlx5_create_hash_flow_group(struct mlx5_flow_table * ft,struct mlx5_flow_definer * definer,u8 rules)14 mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
15 struct mlx5_flow_definer *definer,
16 u8 rules)
17 {
18 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
19 struct mlx5_flow_group *fg;
20 u32 *in;
21
22 in = kvzalloc(inlen, GFP_KERNEL);
23 if (!in)
24 return ERR_PTR(-ENOMEM);
25
26 MLX5_SET(create_flow_group_in, in, match_definer_id,
27 mlx5_get_match_definer_id(definer));
28 MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
29 MLX5_SET(create_flow_group_in, in, end_flow_index, rules - 1);
30 MLX5_SET(create_flow_group_in, in, group_type,
31 MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
32
33 fg = mlx5_create_flow_group(ft, in);
34 kvfree(in);
35 return fg;
36 }
37
mlx5_lag_create_port_sel_table(struct mlx5_lag * ldev,struct mlx5_lag_definer * lag_definer,u8 * ports)38 static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
39 struct mlx5_lag_definer *lag_definer,
40 u8 *ports)
41 {
42 int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
43 struct mlx5_flow_table_attr ft_attr = {};
44 struct mlx5_flow_destination dest = {};
45 MLX5_DECLARE_FLOW_ACT(flow_act);
46 struct mlx5_flow_namespace *ns;
47 struct mlx5_core_dev *dev;
48 int err, i, j, k, idx;
49
50 if (first_idx < 0)
51 return -EINVAL;
52
53 dev = ldev->pf[first_idx].dev;
54 ft_attr.max_fte = ldev->ports * ldev->buckets;
55 ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
56
57 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
58 if (!ns) {
59 mlx5_core_warn(dev, "Failed to get port selection namespace\n");
60 return -EOPNOTSUPP;
61 }
62
63 lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr);
64 if (IS_ERR(lag_definer->ft)) {
65 mlx5_core_warn(dev, "Failed to create port selection table\n");
66 return PTR_ERR(lag_definer->ft);
67 }
68
69 lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
70 lag_definer->definer,
71 ft_attr.max_fte);
72 if (IS_ERR(lag_definer->fg)) {
73 err = PTR_ERR(lag_definer->fg);
74 goto destroy_ft;
75 }
76
77 dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
78 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
79 flow_act.flags |= FLOW_ACT_NO_APPEND;
80 mlx5_ldev_for_each(i, 0, ldev) {
81 for (j = 0; j < ldev->buckets; j++) {
82 u8 affinity;
83
84 idx = i * ldev->buckets + j;
85 affinity = ports[idx];
86
87 dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
88 vhca_id);
89 lag_definer->rules[idx] = mlx5_add_flow_rules(lag_definer->ft,
90 NULL, &flow_act,
91 &dest, 1);
92 if (IS_ERR(lag_definer->rules[idx])) {
93 err = PTR_ERR(lag_definer->rules[idx]);
94 mlx5_ldev_for_each_reverse(k, i, 0, ldev) {
95 while (j--) {
96 idx = k * ldev->buckets + j;
97 mlx5_del_flow_rules(lag_definer->rules[idx]);
98 }
99 j = ldev->buckets;
100 };
101 goto destroy_fg;
102 }
103 }
104 }
105
106 return 0;
107
108 destroy_fg:
109 mlx5_destroy_flow_group(lag_definer->fg);
110 destroy_ft:
111 mlx5_destroy_flow_table(lag_definer->ft);
112 return err;
113 }
114
mlx5_lag_set_definer_inner(u32 * match_definer_mask,enum mlx5_traffic_types tt)115 static int mlx5_lag_set_definer_inner(u32 *match_definer_mask,
116 enum mlx5_traffic_types tt)
117 {
118 int format_id;
119 u8 *ipv6;
120
121 switch (tt) {
122 case MLX5_TT_IPV4_UDP:
123 case MLX5_TT_IPV4_TCP:
124 format_id = 23;
125 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
126 inner_l4_sport);
127 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
128 inner_l4_dport);
129 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
130 inner_ip_src_addr);
131 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
132 inner_ip_dest_addr);
133 break;
134 case MLX5_TT_IPV4:
135 format_id = 23;
136 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
137 inner_l3_type);
138 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
139 inner_dmac_47_16);
140 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
141 inner_dmac_15_0);
142 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
143 inner_smac_47_16);
144 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
145 inner_smac_15_0);
146 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
147 inner_ip_src_addr);
148 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
149 inner_ip_dest_addr);
150 break;
151 case MLX5_TT_IPV6_TCP:
152 case MLX5_TT_IPV6_UDP:
153 format_id = 31;
154 MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
155 inner_l4_sport);
156 MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
157 inner_l4_dport);
158 ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
159 inner_ip_dest_addr);
160 memset(ipv6, 0xff, 16);
161 ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
162 inner_ip_src_addr);
163 memset(ipv6, 0xff, 16);
164 break;
165 case MLX5_TT_IPV6:
166 format_id = 32;
167 ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
168 inner_ip_dest_addr);
169 memset(ipv6, 0xff, 16);
170 ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
171 inner_ip_src_addr);
172 memset(ipv6, 0xff, 16);
173 MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
174 inner_dmac_47_16);
175 MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
176 inner_dmac_15_0);
177 MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
178 inner_smac_47_16);
179 MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
180 inner_smac_15_0);
181 break;
182 default:
183 format_id = 23;
184 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
185 inner_l3_type);
186 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
187 inner_dmac_47_16);
188 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
189 inner_dmac_15_0);
190 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
191 inner_smac_47_16);
192 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
193 inner_smac_15_0);
194 break;
195 }
196
197 return format_id;
198 }
199
mlx5_lag_set_definer(u32 * match_definer_mask,enum mlx5_traffic_types tt,bool tunnel,enum netdev_lag_hash hash)200 static int mlx5_lag_set_definer(u32 *match_definer_mask,
201 enum mlx5_traffic_types tt, bool tunnel,
202 enum netdev_lag_hash hash)
203 {
204 int format_id;
205 u8 *ipv6;
206
207 if (tunnel)
208 return mlx5_lag_set_definer_inner(match_definer_mask, tt);
209
210 switch (tt) {
211 case MLX5_TT_IPV4_UDP:
212 case MLX5_TT_IPV4_TCP:
213 format_id = 22;
214 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
215 outer_l4_sport);
216 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
217 outer_l4_dport);
218 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
219 outer_ip_src_addr);
220 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
221 outer_ip_dest_addr);
222 break;
223 case MLX5_TT_IPV4:
224 format_id = 22;
225 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
226 outer_l3_type);
227 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
228 outer_dmac_47_16);
229 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
230 outer_dmac_15_0);
231 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
232 outer_smac_47_16);
233 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
234 outer_smac_15_0);
235 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
236 outer_ip_src_addr);
237 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
238 outer_ip_dest_addr);
239 break;
240 case MLX5_TT_IPV6_TCP:
241 case MLX5_TT_IPV6_UDP:
242 format_id = 29;
243 MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
244 outer_l4_sport);
245 MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
246 outer_l4_dport);
247 ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
248 outer_ip_dest_addr);
249 memset(ipv6, 0xff, 16);
250 ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
251 outer_ip_src_addr);
252 memset(ipv6, 0xff, 16);
253 break;
254 case MLX5_TT_IPV6:
255 format_id = 30;
256 ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
257 outer_ip_dest_addr);
258 memset(ipv6, 0xff, 16);
259 ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
260 outer_ip_src_addr);
261 memset(ipv6, 0xff, 16);
262 MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
263 outer_dmac_47_16);
264 MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
265 outer_dmac_15_0);
266 MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
267 outer_smac_47_16);
268 MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
269 outer_smac_15_0);
270 break;
271 default:
272 format_id = 0;
273 MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
274 outer_smac_47_16);
275 MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
276 outer_smac_15_0);
277
278 if (hash == NETDEV_LAG_HASH_VLAN_SRCMAC) {
279 MLX5_SET_TO_ONES(match_definer_format_0,
280 match_definer_mask,
281 outer_first_vlan_vid);
282 break;
283 }
284
285 MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
286 outer_ethertype);
287 MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
288 outer_dmac_47_16);
289 MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
290 outer_dmac_15_0);
291 break;
292 }
293
294 return format_id;
295 }
296
297 static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag * ldev,enum netdev_lag_hash hash,enum mlx5_traffic_types tt,bool tunnel,u8 * ports)298 mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
299 enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
300 {
301 int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
302 struct mlx5_lag_definer *lag_definer;
303 struct mlx5_core_dev *dev;
304 u32 *match_definer_mask;
305 int format_id, err;
306
307 if (first_idx < 0)
308 return ERR_PTR(-EINVAL);
309
310 dev = ldev->pf[first_idx].dev;
311 lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
312 if (!lag_definer)
313 return ERR_PTR(-ENOMEM);
314
315 match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer,
316 match_mask),
317 GFP_KERNEL);
318 if (!match_definer_mask) {
319 err = -ENOMEM;
320 goto free_lag_definer;
321 }
322
323 format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash);
324 lag_definer->definer =
325 mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL,
326 format_id, match_definer_mask);
327 if (IS_ERR(lag_definer->definer)) {
328 err = PTR_ERR(lag_definer->definer);
329 goto free_mask;
330 }
331
332 err = mlx5_lag_create_port_sel_table(ldev, lag_definer, ports);
333 if (err)
334 goto destroy_match_definer;
335
336 kvfree(match_definer_mask);
337
338 return lag_definer;
339
340 destroy_match_definer:
341 mlx5_destroy_match_definer(dev, lag_definer->definer);
342 free_mask:
343 kvfree(match_definer_mask);
344 free_lag_definer:
345 kfree(lag_definer);
346 return ERR_PTR(err);
347 }
348
mlx5_lag_destroy_definer(struct mlx5_lag * ldev,struct mlx5_lag_definer * lag_definer)349 static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
350 struct mlx5_lag_definer *lag_definer)
351 {
352 int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
353 struct mlx5_core_dev *dev;
354 int idx, i, j;
355
356 if (first_idx < 0)
357 return;
358
359 dev = ldev->pf[first_idx].dev;
360 mlx5_ldev_for_each(i, first_idx, ldev) {
361 for (j = 0; j < ldev->buckets; j++) {
362 idx = i * ldev->buckets + j;
363 mlx5_del_flow_rules(lag_definer->rules[idx]);
364 }
365 }
366 mlx5_destroy_flow_group(lag_definer->fg);
367 mlx5_destroy_flow_table(lag_definer->ft);
368 mlx5_destroy_match_definer(dev, lag_definer->definer);
369 kfree(lag_definer);
370 }
371
mlx5_lag_destroy_definers(struct mlx5_lag * ldev)372 static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
373 {
374 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
375 int tt;
376
377 for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
378 if (port_sel->outer.definers[tt])
379 mlx5_lag_destroy_definer(ldev,
380 port_sel->outer.definers[tt]);
381 if (port_sel->inner.definers[tt])
382 mlx5_lag_destroy_definer(ldev,
383 port_sel->inner.definers[tt]);
384 }
385 }
386
mlx5_lag_create_definers(struct mlx5_lag * ldev,enum netdev_lag_hash hash_type,u8 * ports)387 static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
388 enum netdev_lag_hash hash_type,
389 u8 *ports)
390 {
391 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
392 struct mlx5_lag_definer *lag_definer;
393 int tt, err;
394
395 for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
396 lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
397 false, ports);
398 if (IS_ERR(lag_definer)) {
399 err = PTR_ERR(lag_definer);
400 goto destroy_definers;
401 }
402 port_sel->outer.definers[tt] = lag_definer;
403
404 if (!port_sel->tunnel)
405 continue;
406
407 lag_definer =
408 mlx5_lag_create_definer(ldev, hash_type, tt,
409 true, ports);
410 if (IS_ERR(lag_definer)) {
411 err = PTR_ERR(lag_definer);
412 goto destroy_definers;
413 }
414 port_sel->inner.definers[tt] = lag_definer;
415 }
416
417 return 0;
418
419 destroy_definers:
420 mlx5_lag_destroy_definers(ldev);
421 return err;
422 }
423
set_tt_map(struct mlx5_lag_port_sel * port_sel,enum netdev_lag_hash hash)424 static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
425 enum netdev_lag_hash hash)
426 {
427 port_sel->tunnel = false;
428
429 switch (hash) {
430 case NETDEV_LAG_HASH_E34:
431 port_sel->tunnel = true;
432 fallthrough;
433 case NETDEV_LAG_HASH_L34:
434 set_bit(MLX5_TT_IPV4_TCP, port_sel->tt_map);
435 set_bit(MLX5_TT_IPV4_UDP, port_sel->tt_map);
436 set_bit(MLX5_TT_IPV6_TCP, port_sel->tt_map);
437 set_bit(MLX5_TT_IPV6_UDP, port_sel->tt_map);
438 set_bit(MLX5_TT_IPV4, port_sel->tt_map);
439 set_bit(MLX5_TT_IPV6, port_sel->tt_map);
440 set_bit(MLX5_TT_ANY, port_sel->tt_map);
441 break;
442 case NETDEV_LAG_HASH_E23:
443 port_sel->tunnel = true;
444 fallthrough;
445 case NETDEV_LAG_HASH_L23:
446 set_bit(MLX5_TT_IPV4, port_sel->tt_map);
447 set_bit(MLX5_TT_IPV6, port_sel->tt_map);
448 set_bit(MLX5_TT_ANY, port_sel->tt_map);
449 break;
450 default:
451 set_bit(MLX5_TT_ANY, port_sel->tt_map);
452 break;
453 }
454 }
455
456 #define SET_IGNORE_DESTS_BITS(tt_map, dests) \
457 do { \
458 int idx; \
459 \
460 for_each_clear_bit(idx, tt_map, MLX5_NUM_TT) \
461 set_bit(idx, dests); \
462 } while (0)
463
mlx5_lag_set_inner_ttc_params(struct mlx5_lag * ldev,struct ttc_params * ttc_params)464 static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
465 struct ttc_params *ttc_params)
466 {
467 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
468 struct mlx5_flow_table_attr *ft_attr;
469 int tt;
470
471 ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
472 ft_attr = &ttc_params->ft_attr;
473 ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
474
475 for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
476 ttc_params->dests[tt].type =
477 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
478 ttc_params->dests[tt].ft = port_sel->inner.definers[tt]->ft;
479 }
480 SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
481 }
482
mlx5_lag_set_outer_ttc_params(struct mlx5_lag * ldev,struct ttc_params * ttc_params)483 static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
484 struct ttc_params *ttc_params)
485 {
486 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
487 struct mlx5_flow_table_attr *ft_attr;
488 int tt;
489
490 ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
491 ft_attr = &ttc_params->ft_attr;
492 ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
493
494 for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
495 ttc_params->dests[tt].type =
496 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
497 ttc_params->dests[tt].ft = port_sel->outer.definers[tt]->ft;
498 }
499 SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
500
501 ttc_params->inner_ttc = port_sel->tunnel;
502 if (!port_sel->tunnel)
503 return;
504
505 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
506 ttc_params->tunnel_dests[tt].type =
507 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
508 ttc_params->tunnel_dests[tt].ft =
509 mlx5_get_ttc_flow_table(port_sel->inner.ttc);
510 }
511 }
512
mlx5_lag_create_ttc_table(struct mlx5_lag * ldev)513 static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
514 {
515 int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
516 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
517 struct ttc_params ttc_params = {};
518 struct mlx5_core_dev *dev;
519
520 if (first_idx < 0)
521 return -EINVAL;
522
523 dev = ldev->pf[first_idx].dev;
524 mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
525 port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
526 return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
527 }
528
mlx5_lag_create_inner_ttc_table(struct mlx5_lag * ldev)529 static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
530 {
531 int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
532 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
533 struct ttc_params ttc_params = {};
534 struct mlx5_core_dev *dev;
535
536 if (first_idx < 0)
537 return -EINVAL;
538
539 dev = ldev->pf[first_idx].dev;
540 mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
541 port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
542 return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
543 }
544
mlx5_lag_port_sel_create(struct mlx5_lag * ldev,enum netdev_lag_hash hash_type,u8 * ports)545 int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
546 enum netdev_lag_hash hash_type, u8 *ports)
547 {
548 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
549 int err;
550
551 set_tt_map(port_sel, hash_type);
552 err = mlx5_lag_create_definers(ldev, hash_type, ports);
553 if (err)
554 goto clear_port_sel;
555
556 if (port_sel->tunnel) {
557 err = mlx5_lag_create_inner_ttc_table(ldev);
558 if (err)
559 goto destroy_definers;
560 }
561
562 err = mlx5_lag_create_ttc_table(ldev);
563 if (err)
564 goto destroy_inner;
565
566 return 0;
567
568 destroy_inner:
569 if (port_sel->tunnel)
570 mlx5_destroy_ttc_table(port_sel->inner.ttc);
571 destroy_definers:
572 mlx5_lag_destroy_definers(ldev);
573 clear_port_sel:
574 memset(port_sel, 0, sizeof(*port_sel));
575 return err;
576 }
577
__mlx5_lag_modify_definers_destinations(struct mlx5_lag * ldev,struct mlx5_lag_definer * def,u8 * ports)578 static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
579 struct mlx5_lag_definer *def,
580 u8 *ports)
581 {
582 struct mlx5_flow_destination dest = {};
583 int idx;
584 int err;
585 int i;
586 int j;
587
588 dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
589 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
590
591 mlx5_ldev_for_each(i, 0, ldev) {
592 for (j = 0; j < ldev->buckets; j++) {
593 idx = i * ldev->buckets + j;
594 if (ldev->v2p_map[idx] == ports[idx])
595 continue;
596
597 dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[ports[idx] - 1].dev,
598 vhca_id);
599 err = mlx5_modify_rule_destination(def->rules[idx], &dest, NULL);
600 if (err)
601 return err;
602 }
603 }
604
605 return 0;
606 }
607
608 static int
mlx5_lag_modify_definers_destinations(struct mlx5_lag * ldev,struct mlx5_lag_definer ** definers,u8 * ports)609 mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
610 struct mlx5_lag_definer **definers,
611 u8 *ports)
612 {
613 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
614 int err;
615 int tt;
616
617 for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
618 err = __mlx5_lag_modify_definers_destinations(ldev, definers[tt], ports);
619 if (err)
620 return err;
621 }
622
623 return 0;
624 }
625
mlx5_lag_port_sel_modify(struct mlx5_lag * ldev,u8 * ports)626 int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports)
627 {
628 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
629 int err;
630
631 err = mlx5_lag_modify_definers_destinations(ldev,
632 port_sel->outer.definers,
633 ports);
634 if (err)
635 return err;
636
637 if (!port_sel->tunnel)
638 return 0;
639
640 return mlx5_lag_modify_definers_destinations(ldev,
641 port_sel->inner.definers,
642 ports);
643 }
644
mlx5_lag_port_sel_destroy(struct mlx5_lag * ldev)645 void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
646 {
647 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
648
649 mlx5_destroy_ttc_table(port_sel->outer.ttc);
650 if (port_sel->tunnel)
651 mlx5_destroy_ttc_table(port_sel->inner.ttc);
652 mlx5_lag_destroy_definers(ldev);
653 memset(port_sel, 0, sizeof(*port_sel));
654 }
655