1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
3
4 #include <mlx5_core.h>
5 #include "fs_hws_pools.h"
6
7 #define MLX5_FS_HWS_DEFAULT_BULK_LEN 65536
8 #define MLX5_FS_HWS_POOL_MAX_THRESHOLD BIT(18)
9 #define MLX5_FS_HWS_POOL_USED_BUFF_RATIO 10
10
11 static struct mlx5hws_action *
mlx5_fs_dl3tnltol2_bulk_action_create(struct mlx5hws_context * ctx)12 mlx5_fs_dl3tnltol2_bulk_action_create(struct mlx5hws_context *ctx)
13 {
14 struct mlx5hws_action_reformat_header reformat_hdr[2] = {};
15 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
16 enum mlx5hws_action_type reformat_type;
17 u32 log_bulk_size;
18
19 reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
20 reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX].sz = ETH_HLEN;
21 reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX].sz = ETH_HLEN + VLAN_HLEN;
22
23 log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
24 return mlx5hws_action_create_reformat(ctx, reformat_type, 2,
25 reformat_hdr, log_bulk_size, flags);
26 }
27
28 static struct mlx5hws_action *
mlx5_fs_el2tol3tnl_bulk_action_create(struct mlx5hws_context * ctx,size_t data_size)29 mlx5_fs_el2tol3tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
30 {
31 struct mlx5hws_action_reformat_header reformat_hdr = {};
32 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
33 enum mlx5hws_action_type reformat_type;
34 u32 log_bulk_size;
35
36 reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
37 reformat_hdr.sz = data_size;
38
39 log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
40 return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
41 &reformat_hdr, log_bulk_size, flags);
42 }
43
44 static struct mlx5hws_action *
mlx5_fs_el2tol2tnl_bulk_action_create(struct mlx5hws_context * ctx,size_t data_size)45 mlx5_fs_el2tol2tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
46 {
47 struct mlx5hws_action_reformat_header reformat_hdr = {};
48 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
49 enum mlx5hws_action_type reformat_type;
50 u32 log_bulk_size;
51
52 reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
53 reformat_hdr.sz = data_size;
54
55 log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
56 return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
57 &reformat_hdr, log_bulk_size, flags);
58 }
59
60 static struct mlx5hws_action *
mlx5_fs_insert_hdr_bulk_action_create(struct mlx5hws_context * ctx)61 mlx5_fs_insert_hdr_bulk_action_create(struct mlx5hws_context *ctx)
62 {
63 struct mlx5hws_action_insert_header insert_hdr = {};
64 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
65 u32 log_bulk_size;
66
67 log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
68 insert_hdr.hdr.sz = MLX5_FS_INSERT_HDR_VLAN_SIZE;
69 insert_hdr.anchor = MLX5_FS_INSERT_HDR_VLAN_ANCHOR;
70 insert_hdr.offset = MLX5_FS_INSERT_HDR_VLAN_OFFSET;
71
72 return mlx5hws_action_create_insert_header(ctx, 1, &insert_hdr,
73 log_bulk_size, flags);
74 }
75
76 static struct mlx5hws_action *
mlx5_fs_pr_bulk_action_create(struct mlx5_core_dev * dev,struct mlx5_fs_hws_pr_pool_ctx * pr_pool_ctx)77 mlx5_fs_pr_bulk_action_create(struct mlx5_core_dev *dev,
78 struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx)
79 {
80 struct mlx5_flow_root_namespace *root_ns;
81 struct mlx5hws_context *ctx;
82 size_t encap_data_size;
83
84 root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
85 if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
86 return NULL;
87
88 ctx = root_ns->fs_hws_context.hws_ctx;
89 if (!ctx)
90 return NULL;
91
92 encap_data_size = pr_pool_ctx->encap_data_size;
93 switch (pr_pool_ctx->reformat_type) {
94 case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
95 return mlx5_fs_dl3tnltol2_bulk_action_create(ctx);
96 case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
97 return mlx5_fs_el2tol3tnl_bulk_action_create(ctx, encap_data_size);
98 case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
99 return mlx5_fs_el2tol2tnl_bulk_action_create(ctx, encap_data_size);
100 case MLX5HWS_ACTION_TYP_INSERT_HEADER:
101 return mlx5_fs_insert_hdr_bulk_action_create(ctx);
102 default:
103 return NULL;
104 }
105 return NULL;
106 }
107
108 static struct mlx5_fs_bulk *
mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev * dev,void * pool_ctx)109 mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
110 {
111 struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
112 struct mlx5_fs_hws_pr_bulk *pr_bulk;
113 int bulk_len;
114 int i;
115
116 if (!pool_ctx)
117 return NULL;
118 pr_pool_ctx = pool_ctx;
119 bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
120 pr_bulk = kvzalloc(struct_size(pr_bulk, prs_data, bulk_len), GFP_KERNEL);
121 if (!pr_bulk)
122 return NULL;
123
124 if (mlx5_fs_bulk_init(dev, &pr_bulk->fs_bulk, bulk_len))
125 goto free_pr_bulk;
126
127 for (i = 0; i < bulk_len; i++) {
128 pr_bulk->prs_data[i].bulk = pr_bulk;
129 pr_bulk->prs_data[i].offset = i;
130 }
131
132 pr_bulk->hws_action = mlx5_fs_pr_bulk_action_create(dev, pr_pool_ctx);
133 if (!pr_bulk->hws_action)
134 goto cleanup_fs_bulk;
135
136 return &pr_bulk->fs_bulk;
137
138 cleanup_fs_bulk:
139 mlx5_fs_bulk_cleanup(&pr_bulk->fs_bulk);
140 free_pr_bulk:
141 kvfree(pr_bulk);
142 return NULL;
143 }
144
145 static int
mlx5_fs_hws_pr_bulk_destroy(struct mlx5_core_dev * dev,struct mlx5_fs_bulk * fs_bulk)146 mlx5_fs_hws_pr_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
147 {
148 struct mlx5_fs_hws_pr_bulk *pr_bulk;
149
150 pr_bulk = container_of(fs_bulk, struct mlx5_fs_hws_pr_bulk, fs_bulk);
151 if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
152 mlx5_core_err(dev, "Freeing bulk before all reformats were released\n");
153 return -EBUSY;
154 }
155
156 mlx5hws_action_destroy(pr_bulk->hws_action);
157 mlx5_fs_bulk_cleanup(fs_bulk);
158 kvfree(pr_bulk);
159
160 return 0;
161 }
162
mlx5_hws_pool_update_threshold(struct mlx5_fs_pool * hws_pool)163 static void mlx5_hws_pool_update_threshold(struct mlx5_fs_pool *hws_pool)
164 {
165 hws_pool->threshold = min_t(int, MLX5_FS_HWS_POOL_MAX_THRESHOLD,
166 hws_pool->used_units / MLX5_FS_HWS_POOL_USED_BUFF_RATIO);
167 }
168
169 static const struct mlx5_fs_pool_ops mlx5_fs_hws_pr_pool_ops = {
170 .bulk_create = mlx5_fs_hws_pr_bulk_create,
171 .bulk_destroy = mlx5_fs_hws_pr_bulk_destroy,
172 .update_threshold = mlx5_hws_pool_update_threshold,
173 };
174
mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool * pr_pool,struct mlx5_core_dev * dev,size_t encap_data_size,enum mlx5hws_action_type reformat_type)175 int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
176 struct mlx5_core_dev *dev, size_t encap_data_size,
177 enum mlx5hws_action_type reformat_type)
178 {
179 struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
180
181 if (reformat_type != MLX5HWS_ACTION_TYP_INSERT_HEADER &&
182 reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 &&
183 reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 &&
184 reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2)
185 return -EOPNOTSUPP;
186
187 pr_pool_ctx = kzalloc(sizeof(*pr_pool_ctx), GFP_KERNEL);
188 if (!pr_pool_ctx)
189 return -ENOMEM;
190 pr_pool_ctx->reformat_type = reformat_type;
191 pr_pool_ctx->encap_data_size = encap_data_size;
192 mlx5_fs_pool_init(pr_pool, dev, &mlx5_fs_hws_pr_pool_ops, pr_pool_ctx);
193 return 0;
194 }
195
mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool * pr_pool)196 void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool)
197 {
198 struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
199
200 mlx5_fs_pool_cleanup(pr_pool);
201 pr_pool_ctx = pr_pool->pool_ctx;
202 if (!pr_pool_ctx)
203 return;
204 kfree(pr_pool_ctx);
205 }
206
207 struct mlx5_fs_hws_pr *
mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool * pr_pool)208 mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool)
209 {
210 struct mlx5_fs_pool_index pool_index = {};
211 struct mlx5_fs_hws_pr_bulk *pr_bulk;
212 int err;
213
214 err = mlx5_fs_pool_acquire_index(pr_pool, &pool_index);
215 if (err)
216 return ERR_PTR(err);
217 pr_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_pr_bulk,
218 fs_bulk);
219 return &pr_bulk->prs_data[pool_index.index];
220 }
221
mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool * pr_pool,struct mlx5_fs_hws_pr * pr_data)222 void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
223 struct mlx5_fs_hws_pr *pr_data)
224 {
225 struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk;
226 struct mlx5_fs_pool_index pool_index = {};
227 struct mlx5_core_dev *dev = pr_pool->dev;
228
229 pool_index.fs_bulk = fs_bulk;
230 pool_index.index = pr_data->offset;
231 if (mlx5_fs_pool_release_index(pr_pool, &pool_index))
232 mlx5_core_warn(dev, "Attempted to release packet reformat which is not acquired\n");
233 }
234
mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr * pr_data)235 struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data)
236 {
237 return pr_data->bulk->hws_action;
238 }
239
240 static struct mlx5hws_action *
mlx5_fs_mh_bulk_action_create(struct mlx5hws_context * ctx,struct mlx5hws_action_mh_pattern * pattern)241 mlx5_fs_mh_bulk_action_create(struct mlx5hws_context *ctx,
242 struct mlx5hws_action_mh_pattern *pattern)
243 {
244 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
245 u32 log_bulk_size;
246
247 log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
248 return mlx5hws_action_create_modify_header(ctx, 1, pattern,
249 log_bulk_size, flags);
250 }
251
252 static struct mlx5_fs_bulk *
mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev * dev,void * pool_ctx)253 mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
254 {
255 struct mlx5hws_action_mh_pattern *pattern;
256 struct mlx5_flow_root_namespace *root_ns;
257 struct mlx5_fs_hws_mh_bulk *mh_bulk;
258 struct mlx5hws_context *ctx;
259 int bulk_len;
260
261 if (!pool_ctx)
262 return NULL;
263
264 root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
265 if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
266 return NULL;
267
268 ctx = root_ns->fs_hws_context.hws_ctx;
269 if (!ctx)
270 return NULL;
271
272 pattern = pool_ctx;
273 bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
274 mh_bulk = kvzalloc(struct_size(mh_bulk, mhs_data, bulk_len), GFP_KERNEL);
275 if (!mh_bulk)
276 return NULL;
277
278 if (mlx5_fs_bulk_init(dev, &mh_bulk->fs_bulk, bulk_len))
279 goto free_mh_bulk;
280
281 for (int i = 0; i < bulk_len; i++) {
282 mh_bulk->mhs_data[i].bulk = mh_bulk;
283 mh_bulk->mhs_data[i].offset = i;
284 }
285
286 mh_bulk->hws_action = mlx5_fs_mh_bulk_action_create(ctx, pattern);
287 if (!mh_bulk->hws_action)
288 goto cleanup_fs_bulk;
289
290 return &mh_bulk->fs_bulk;
291
292 cleanup_fs_bulk:
293 mlx5_fs_bulk_cleanup(&mh_bulk->fs_bulk);
294 free_mh_bulk:
295 kvfree(mh_bulk);
296 return NULL;
297 }
298
299 static int
mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev * dev,struct mlx5_fs_bulk * fs_bulk)300 mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev *dev,
301 struct mlx5_fs_bulk *fs_bulk)
302 {
303 struct mlx5_fs_hws_mh_bulk *mh_bulk;
304
305 mh_bulk = container_of(fs_bulk, struct mlx5_fs_hws_mh_bulk, fs_bulk);
306 if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
307 mlx5_core_err(dev, "Freeing bulk before all modify header were released\n");
308 return -EBUSY;
309 }
310
311 mlx5hws_action_destroy(mh_bulk->hws_action);
312 mlx5_fs_bulk_cleanup(fs_bulk);
313 kvfree(mh_bulk);
314
315 return 0;
316 }
317
318 static const struct mlx5_fs_pool_ops mlx5_fs_hws_mh_pool_ops = {
319 .bulk_create = mlx5_fs_hws_mh_bulk_create,
320 .bulk_destroy = mlx5_fs_hws_mh_bulk_destroy,
321 .update_threshold = mlx5_hws_pool_update_threshold,
322 };
323
mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool * fs_hws_mh_pool,struct mlx5_core_dev * dev,struct mlx5hws_action_mh_pattern * pattern)324 int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
325 struct mlx5_core_dev *dev,
326 struct mlx5hws_action_mh_pattern *pattern)
327 {
328 struct mlx5hws_action_mh_pattern *pool_pattern;
329
330 pool_pattern = kzalloc(sizeof(*pool_pattern), GFP_KERNEL);
331 if (!pool_pattern)
332 return -ENOMEM;
333 pool_pattern->data = kmemdup(pattern->data, pattern->sz, GFP_KERNEL);
334 if (!pool_pattern->data) {
335 kfree(pool_pattern);
336 return -ENOMEM;
337 }
338 pool_pattern->sz = pattern->sz;
339 mlx5_fs_pool_init(fs_hws_mh_pool, dev, &mlx5_fs_hws_mh_pool_ops,
340 pool_pattern);
341 return 0;
342 }
343
mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool * fs_hws_mh_pool)344 void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool)
345 {
346 struct mlx5hws_action_mh_pattern *pool_pattern;
347
348 mlx5_fs_pool_cleanup(fs_hws_mh_pool);
349 pool_pattern = fs_hws_mh_pool->pool_ctx;
350 if (!pool_pattern)
351 return;
352 kfree(pool_pattern->data);
353 kfree(pool_pattern);
354 }
355
356 struct mlx5_fs_hws_mh *
mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool * mh_pool)357 mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool)
358 {
359 struct mlx5_fs_pool_index pool_index = {};
360 struct mlx5_fs_hws_mh_bulk *mh_bulk;
361 int err;
362
363 err = mlx5_fs_pool_acquire_index(mh_pool, &pool_index);
364 if (err)
365 return ERR_PTR(err);
366 mh_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_mh_bulk,
367 fs_bulk);
368 return &mh_bulk->mhs_data[pool_index.index];
369 }
370
mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool * mh_pool,struct mlx5_fs_hws_mh * mh_data)371 void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
372 struct mlx5_fs_hws_mh *mh_data)
373 {
374 struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk;
375 struct mlx5_fs_pool_index pool_index = {};
376 struct mlx5_core_dev *dev = mh_pool->dev;
377
378 pool_index.fs_bulk = fs_bulk;
379 pool_index.index = mh_data->offset;
380 if (mlx5_fs_pool_release_index(mh_pool, &pool_index))
381 mlx5_core_warn(dev, "Attempted to release modify header which is not acquired\n");
382 }
383
mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool * mh_pool,struct mlx5hws_action_mh_pattern * pattern)384 bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
385 struct mlx5hws_action_mh_pattern *pattern)
386 {
387 struct mlx5hws_action_mh_pattern *pool_pattern;
388 int num_actions, i;
389
390 pool_pattern = mh_pool->pool_ctx;
391 if (WARN_ON_ONCE(!pool_pattern))
392 return false;
393
394 if (pattern->sz != pool_pattern->sz)
395 return false;
396 num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
397 for (i = 0; i < num_actions; i++) {
398 if ((__force __be32)pattern->data[i] !=
399 (__force __be32)pool_pattern->data[i])
400 return false;
401 }
402 return true;
403 }
404
mlx5_fc_get_hws_action(struct mlx5hws_context * ctx,struct mlx5_fc * counter)405 struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
406 struct mlx5_fc *counter)
407 {
408 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
409 struct mlx5_fc_bulk *fc_bulk = counter->bulk;
410 struct mlx5_fc_bulk_hws_data *fc_bulk_hws;
411
412 fc_bulk_hws = &fc_bulk->hws_data;
413 /* try avoid locking if not necessary */
414 if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount))
415 return fc_bulk_hws->hws_action;
416
417 mutex_lock(&fc_bulk_hws->lock);
418 if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) {
419 mutex_unlock(&fc_bulk_hws->lock);
420 return fc_bulk_hws->hws_action;
421 }
422 fc_bulk_hws->hws_action =
423 mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags);
424 if (!fc_bulk_hws->hws_action) {
425 mutex_unlock(&fc_bulk_hws->lock);
426 return NULL;
427 }
428 refcount_set(&fc_bulk_hws->hws_action_refcount, 1);
429 mutex_unlock(&fc_bulk_hws->lock);
430
431 return fc_bulk_hws->hws_action;
432 }
433
mlx5_fc_put_hws_action(struct mlx5_fc * counter)434 void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
435 {
436 struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data;
437
438 /* try avoid locking if not necessary */
439 if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount))
440 return;
441
442 mutex_lock(&fc_bulk_hws->lock);
443 if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) {
444 mutex_unlock(&fc_bulk_hws->lock);
445 return;
446 }
447 mlx5hws_action_destroy(fc_bulk_hws->hws_action);
448 fc_bulk_hws->hws_action = NULL;
449 mutex_unlock(&fc_bulk_hws->lock);
450 }
451