1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
3
4 #include "internal.h"
5
mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context * ctx)6 bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
7 {
8 return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
9 }
10
mlx5hws_context_get_reparse_mode(struct mlx5hws_context * ctx)11 u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx)
12 {
13 /* Prefer to use dynamic reparse, reparse only specific actions */
14 if (mlx5hws_context_cap_dynamic_reparse(ctx))
15 return MLX5_IFC_RTC_REPARSE_NEVER;
16
17 /* Otherwise use less efficient static */
18 return MLX5_IFC_RTC_REPARSE_ALWAYS;
19 }
20
hws_context_pools_init(struct mlx5hws_context * ctx)21 static int hws_context_pools_init(struct mlx5hws_context *ctx)
22 {
23 struct mlx5hws_pool_attr pool_attr = {0};
24 u8 max_log_sz;
25 int ret;
26
27 ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
28 if (ret)
29 return ret;
30
31 ret = mlx5hws_definer_init_cache(&ctx->definer_cache);
32 if (ret)
33 goto uninit_pat_cache;
34
35 /* Create an STC pool per FT type */
36 pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
37 pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
38 max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
39 pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
40
41 pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
42 ctx->stc_pool = mlx5hws_pool_create(ctx, &pool_attr);
43 if (!ctx->stc_pool) {
44 mlx5hws_err(ctx, "Failed to allocate STC pool\n");
45 ret = -ENOMEM;
46 goto uninit_cache;
47 }
48
49 return 0;
50
51 uninit_cache:
52 mlx5hws_definer_uninit_cache(ctx->definer_cache);
53 uninit_pat_cache:
54 mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
55 return ret;
56 }
57
hws_context_pools_uninit(struct mlx5hws_context * ctx)58 static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
59 {
60 if (ctx->stc_pool)
61 mlx5hws_pool_destroy(ctx->stc_pool);
62
63 mlx5hws_definer_uninit_cache(ctx->definer_cache);
64 mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
65 }
66
hws_context_init_pd(struct mlx5hws_context * ctx)67 static int hws_context_init_pd(struct mlx5hws_context *ctx)
68 {
69 int ret = 0;
70
71 ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num);
72 if (ret) {
73 mlx5hws_err(ctx, "Failed to allocate PD\n");
74 return ret;
75 }
76
77 ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD;
78
79 return 0;
80 }
81
hws_context_uninit_pd(struct mlx5hws_context * ctx)82 static int hws_context_uninit_pd(struct mlx5hws_context *ctx)
83 {
84 if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD)
85 mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num);
86
87 return 0;
88 }
89
hws_context_check_hws_supp(struct mlx5hws_context * ctx)90 static void hws_context_check_hws_supp(struct mlx5hws_context *ctx)
91 {
92 struct mlx5hws_cmd_query_caps *caps = ctx->caps;
93
94 /* HWS not supported on device / FW */
95 if (!caps->wqe_based_update) {
96 mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n");
97 return;
98 }
99
100 if (!caps->eswitch_manager) {
101 mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n");
102 return;
103 }
104
105 /* Current solution requires all rules to set reparse bit */
106 if ((!caps->nic_ft.reparse ||
107 (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
108 !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
109 mlx5hws_err(ctx, "Required HWS reparse cap not supported\n");
110 return;
111 }
112
113 /* FW/HW must support 8DW STE */
114 if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
115 mlx5hws_err(ctx, "Required HWS STE format not supported\n");
116 return;
117 }
118
119 /* Adding rules by hash and by offset are requirements */
120 if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
121 !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
122 mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n");
123 return;
124 }
125
126 /* Support for SELECT definer ID is required */
127 if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
128 mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n");
129 return;
130 }
131
132 ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT;
133 }
134
hws_context_init_hws(struct mlx5hws_context * ctx,struct mlx5hws_context_attr * attr)135 static int hws_context_init_hws(struct mlx5hws_context *ctx,
136 struct mlx5hws_context_attr *attr)
137 {
138 int ret;
139
140 hws_context_check_hws_supp(ctx);
141
142 if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
143 return 0;
144
145 ret = hws_context_init_pd(ctx);
146 if (ret)
147 return ret;
148
149 ret = hws_context_pools_init(ctx);
150 if (ret)
151 goto uninit_pd;
152
153 /* Context has support for backward compatible API,
154 * and does not have support for native HWS API.
155 */
156 ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
157
158 ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
159 if (ret)
160 goto pools_uninit;
161
162 INIT_LIST_HEAD(&ctx->tbl_list);
163
164 return 0;
165
166 pools_uninit:
167 hws_context_pools_uninit(ctx);
168 uninit_pd:
169 hws_context_uninit_pd(ctx);
170 return ret;
171 }
172
hws_context_uninit_hws(struct mlx5hws_context * ctx)173 static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
174 {
175 if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
176 return;
177
178 mlx5hws_send_queues_close(ctx);
179 hws_context_pools_uninit(ctx);
180 hws_context_uninit_pd(ctx);
181 }
182
mlx5hws_context_open(struct mlx5_core_dev * mdev,struct mlx5hws_context_attr * attr)183 struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev,
184 struct mlx5hws_context_attr *attr)
185 {
186 struct mlx5hws_context *ctx;
187 int ret;
188
189 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
190 if (!ctx)
191 return NULL;
192
193 ctx->mdev = mdev;
194
195 mutex_init(&ctx->ctrl_lock);
196 xa_init(&ctx->peer_ctx_xa);
197
198 ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL);
199 if (!ctx->caps)
200 goto free_ctx;
201
202 ret = mlx5hws_cmd_query_caps(mdev, ctx->caps);
203 if (ret)
204 goto free_caps;
205
206 ret = mlx5hws_vport_init_vports(ctx);
207 if (ret)
208 goto free_caps;
209
210 ret = hws_context_init_hws(ctx, attr);
211 if (ret)
212 goto uninit_vports;
213
214 mlx5hws_debug_init_dump(ctx);
215
216 return ctx;
217
218 uninit_vports:
219 mlx5hws_vport_uninit_vports(ctx);
220 free_caps:
221 kfree(ctx->caps);
222 free_ctx:
223 xa_destroy(&ctx->peer_ctx_xa);
224 mutex_destroy(&ctx->ctrl_lock);
225 kfree(ctx);
226 return NULL;
227 }
228
mlx5hws_context_close(struct mlx5hws_context * ctx)229 int mlx5hws_context_close(struct mlx5hws_context *ctx)
230 {
231 mlx5hws_debug_uninit_dump(ctx);
232 hws_context_uninit_hws(ctx);
233 mlx5hws_vport_uninit_vports(ctx);
234 kfree(ctx->caps);
235 xa_destroy(&ctx->peer_ctx_xa);
236 mutex_destroy(&ctx->ctrl_lock);
237 kfree(ctx);
238 return 0;
239 }
240
mlx5hws_context_set_peer(struct mlx5hws_context * ctx,struct mlx5hws_context * peer_ctx,u16 peer_vhca_id)241 void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
242 struct mlx5hws_context *peer_ctx,
243 u16 peer_vhca_id)
244 {
245 mutex_lock(&ctx->ctrl_lock);
246
247 if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
248 pr_warn("HWS: failed storing peer vhca ID in peer xarray\n");
249
250 mutex_unlock(&ctx->ctrl_lock);
251 }
252