1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3
4 #include "internal.h"
5
hws_rule_skip(struct mlx5hws_matcher * matcher,struct mlx5hws_match_template * mt,u32 flow_source,bool * skip_rx,bool * skip_tx)6 static void hws_rule_skip(struct mlx5hws_matcher *matcher,
7 struct mlx5hws_match_template *mt,
8 u32 flow_source,
9 bool *skip_rx, bool *skip_tx)
10 {
11 /* By default FDB rules are added to both RX and TX */
12 *skip_rx = false;
13 *skip_tx = false;
14
15 if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
16 *skip_rx = true;
17 } else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
18 *skip_tx = true;
19 } else {
20 /* If no flow source was set for current rule,
21 * check for flow source in matcher attributes.
22 */
23 if (matcher->attr.optimize_flow_src) {
24 *skip_tx =
25 matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE;
26 *skip_rx =
27 matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT;
28 return;
29 }
30 }
31 }
32
33 static void
hws_rule_update_copy_tag(struct mlx5hws_rule * rule,struct mlx5hws_wqe_gta_data_seg_ste * wqe_data,bool is_jumbo)34 hws_rule_update_copy_tag(struct mlx5hws_rule *rule,
35 struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
36 bool is_jumbo)
37 {
38 struct mlx5hws_rule_match_tag *tag;
39
40 if (!mlx5hws_matcher_is_resizable(rule->matcher)) {
41 tag = &rule->tag;
42 } else {
43 struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
44 (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
45 tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
46 }
47
48 if (is_jumbo)
49 memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
50 else
51 memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
52 }
53
hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe * dep_wqe,struct mlx5hws_rule * rule,struct mlx5hws_match_template * mt,struct mlx5hws_rule_attr * attr)54 static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
55 struct mlx5hws_rule *rule,
56 struct mlx5hws_match_template *mt,
57 struct mlx5hws_rule_attr *attr)
58 {
59 struct mlx5hws_matcher *matcher = rule->matcher;
60 struct mlx5hws_table *tbl = matcher->tbl;
61 bool skip_rx, skip_tx;
62
63 dep_wqe->rule = rule;
64 dep_wqe->user_data = attr->user_data;
65 dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
66 attr->rule_idx : 0;
67
68 if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
69 hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx);
70
71 if (!skip_rx) {
72 dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
73 dep_wqe->retry_rtc_0 = matcher->col_matcher ?
74 matcher->col_matcher->match_ste.rtc_0_id : 0;
75 } else {
76 dep_wqe->rtc_0 = 0;
77 dep_wqe->retry_rtc_0 = 0;
78 }
79
80 if (!skip_tx) {
81 dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id;
82 dep_wqe->retry_rtc_1 = matcher->col_matcher ?
83 matcher->col_matcher->match_ste.rtc_1_id : 0;
84 } else {
85 dep_wqe->rtc_1 = 0;
86 dep_wqe->retry_rtc_1 = 0;
87 }
88 } else {
89 pr_warn("HWS: invalid tbl->type: %d\n", tbl->type);
90 }
91 }
92
hws_rule_move_get_rtc(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr)93 static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule,
94 struct mlx5hws_send_ste_attr *ste_attr)
95 {
96 struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst;
97
98 if (rule->resize_info->rtc_0) {
99 ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id;
100 ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
101 dst_matcher->col_matcher->match_ste.rtc_0_id : 0;
102 }
103 if (rule->resize_info->rtc_1) {
104 ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id;
105 ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
106 dst_matcher->col_matcher->match_ste.rtc_1_id : 0;
107 }
108 }
109
hws_rule_gen_comp(struct mlx5hws_send_engine * queue,struct mlx5hws_rule * rule,bool err,void * user_data,enum mlx5hws_rule_status rule_status_on_succ)110 static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
111 struct mlx5hws_rule *rule,
112 bool err,
113 void *user_data,
114 enum mlx5hws_rule_status rule_status_on_succ)
115 {
116 enum mlx5hws_flow_op_status comp_status;
117
118 if (!err) {
119 comp_status = MLX5HWS_FLOW_OP_SUCCESS;
120 rule->status = rule_status_on_succ;
121 } else {
122 comp_status = MLX5HWS_FLOW_OP_ERROR;
123 rule->status = MLX5HWS_RULE_STATUS_FAILED;
124 }
125
126 mlx5hws_send_engine_inc_rule(queue);
127 mlx5hws_send_engine_gen_comp(queue, user_data, comp_status);
128 }
129
130 static void
hws_rule_save_resize_info(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr)131 hws_rule_save_resize_info(struct mlx5hws_rule *rule,
132 struct mlx5hws_send_ste_attr *ste_attr)
133 {
134 if (!mlx5hws_matcher_is_resizable(rule->matcher))
135 return;
136
137 /* resize_info might already exist (if we're in update flow) */
138 if (likely(!rule->resize_info)) {
139 rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
140 if (unlikely(!rule->resize_info)) {
141 pr_warn("HWS: resize info isn't allocated for rule\n");
142 return;
143 }
144 }
145
146 memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
147 sizeof(rule->resize_info->ctrl_seg));
148 memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
149 sizeof(rule->resize_info->data_seg));
150 }
151
mlx5hws_rule_clear_resize_info(struct mlx5hws_rule * rule)152 void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule)
153 {
154 if (mlx5hws_matcher_is_resizable(rule->matcher) &&
155 rule->resize_info) {
156 kfree(rule->resize_info);
157 rule->resize_info = NULL;
158 }
159 }
160
161 static void
hws_rule_save_delete_info(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr)162 hws_rule_save_delete_info(struct mlx5hws_rule *rule,
163 struct mlx5hws_send_ste_attr *ste_attr)
164 {
165 struct mlx5hws_match_template *mt = rule->matcher->mt;
166 bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
167
168 if (mlx5hws_matcher_is_resizable(rule->matcher))
169 return;
170
171 if (is_jumbo)
172 memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ);
173 else
174 memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ);
175 }
176
177 static void
hws_rule_clear_delete_info(struct mlx5hws_rule * rule)178 hws_rule_clear_delete_info(struct mlx5hws_rule *rule)
179 {
180 /* nothing to do here */
181 }
182
183 static void
hws_rule_load_delete_info(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr)184 hws_rule_load_delete_info(struct mlx5hws_rule *rule,
185 struct mlx5hws_send_ste_attr *ste_attr)
186 {
187 if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) {
188 ste_attr->wqe_tag = &rule->tag;
189 } else {
190 struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
191 (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
192 struct mlx5hws_rule_match_tag *tag =
193 (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
194 ste_attr->wqe_tag = tag;
195 }
196 }
197
hws_rule_alloc_action_ste(struct mlx5hws_rule * rule)198 static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule)
199 {
200 struct mlx5hws_matcher *matcher = rule->matcher;
201 struct mlx5hws_matcher_action_ste *action_ste;
202 struct mlx5hws_pool_chunk ste = {0};
203 int ret;
204
205 action_ste = &matcher->action_ste;
206 ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
207 ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
208 if (unlikely(ret)) {
209 mlx5hws_err(matcher->tbl->ctx,
210 "Failed to allocate STE for rule actions");
211 return ret;
212 }
213
214 rule->action_ste.pool = matcher->action_ste.pool;
215 rule->action_ste.num_stes = matcher->action_ste.max_stes;
216 rule->action_ste.index = ste.offset;
217
218 return 0;
219 }
220
mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info * action_ste)221 void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste)
222 {
223 struct mlx5hws_pool_chunk ste = {0};
224
225 if (!action_ste->num_stes)
226 return;
227
228 ste.order = ilog2(roundup_pow_of_two(action_ste->num_stes));
229 ste.offset = action_ste->index;
230
231 /* This release is safe only when the rule match STE was deleted
232 * (when the rule is being deleted) or replaced with the new STE that
233 * isn't pointing to old action STEs (when the rule is being updated).
234 */
235 mlx5hws_pool_chunk_free(action_ste->pool, &ste);
236 }
237
hws_rule_create_init(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr,struct mlx5hws_actions_apply_data * apply,bool is_update)238 static void hws_rule_create_init(struct mlx5hws_rule *rule,
239 struct mlx5hws_send_ste_attr *ste_attr,
240 struct mlx5hws_actions_apply_data *apply,
241 bool is_update)
242 {
243 struct mlx5hws_matcher *matcher = rule->matcher;
244 struct mlx5hws_table *tbl = matcher->tbl;
245 struct mlx5hws_context *ctx = tbl->ctx;
246
247 /* Init rule before reuse */
248 if (!is_update) {
249 /* In update we use these rtc's */
250 rule->rtc_0 = 0;
251 rule->rtc_1 = 0;
252
253 rule->action_ste.pool = NULL;
254 rule->action_ste.num_stes = 0;
255 rule->action_ste.index = -1;
256
257 rule->status = MLX5HWS_RULE_STATUS_CREATING;
258 } else {
259 rule->status = MLX5HWS_RULE_STATUS_UPDATING;
260 }
261
262 /* Initialize the old action STE info - shallow-copy action_ste.
263 * In create flow this will set old_action_ste fields to initial values.
264 * In update flow this will save the existing action STE info,
265 * so that we will later use it to free old STEs.
266 */
267 rule->old_action_ste = rule->action_ste;
268
269 rule->pending_wqes = 0;
270
271 /* Init default send STE attributes */
272 ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
273 ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
274 ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
275 ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
276
277 /* Init default action apply */
278 apply->tbl_type = tbl->type;
279 apply->common_res = &ctx->common_res;
280 apply->jump_to_action_stc = matcher->action_ste.stc.offset;
281 apply->require_dep = 0;
282 }
283
hws_rule_move_init(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)284 static void hws_rule_move_init(struct mlx5hws_rule *rule,
285 struct mlx5hws_rule_attr *attr)
286 {
287 /* Save the old RTC IDs to be later used in match STE delete */
288 rule->resize_info->rtc_0 = rule->rtc_0;
289 rule->resize_info->rtc_1 = rule->rtc_1;
290 rule->resize_info->rule_idx = attr->rule_idx;
291
292 rule->rtc_0 = 0;
293 rule->rtc_1 = 0;
294
295 rule->pending_wqes = 0;
296 rule->status = MLX5HWS_RULE_STATUS_CREATING;
297 rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
298 }
299
mlx5hws_rule_move_in_progress(struct mlx5hws_rule * rule)300 bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule)
301 {
302 return mlx5hws_matcher_is_in_resize(rule->matcher) &&
303 rule->resize_info &&
304 rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE;
305 }
306
hws_rule_create_hws(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr,u8 mt_idx,u32 * match_param,u8 at_idx,struct mlx5hws_rule_action rule_actions[])307 static int hws_rule_create_hws(struct mlx5hws_rule *rule,
308 struct mlx5hws_rule_attr *attr,
309 u8 mt_idx,
310 u32 *match_param,
311 u8 at_idx,
312 struct mlx5hws_rule_action rule_actions[])
313 {
314 struct mlx5hws_action_template *at = &rule->matcher->at[at_idx];
315 struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx];
316 bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
317 struct mlx5hws_matcher *matcher = rule->matcher;
318 struct mlx5hws_context *ctx = matcher->tbl->ctx;
319 struct mlx5hws_send_ste_attr ste_attr = {0};
320 struct mlx5hws_send_ring_dep_wqe *dep_wqe;
321 struct mlx5hws_actions_wqe_setter *setter;
322 struct mlx5hws_actions_apply_data apply;
323 struct mlx5hws_send_engine *queue;
324 u8 total_stes, action_stes;
325 bool is_update;
326 int i, ret;
327
328 is_update = !match_param;
329
330 setter = &at->setters[at->num_of_action_stes];
331 total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
332 action_stes = total_stes - 1;
333
334 queue = &ctx->send_queue[attr->queue_id];
335 if (unlikely(mlx5hws_send_engine_err(queue)))
336 return -EIO;
337
338 hws_rule_create_init(rule, &ste_attr, &apply, is_update);
339
340 /* Allocate dependent match WQE since rule might have dependent writes.
341 * The queued dependent WQE can be later aborted or kept as a dependency.
342 * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
343 */
344 dep_wqe = mlx5hws_send_add_new_dep_wqe(queue);
345 hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr);
346
347 ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
348 ste_attr.wqe_data = &dep_wqe->wqe_data;
349 apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
350 apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data;
351 apply.rule_action = rule_actions;
352 apply.queue = queue;
353
354 if (action_stes) {
355 /* Allocate action STEs for rules that need more than match STE */
356 ret = hws_rule_alloc_action_ste(rule);
357 if (ret) {
358 mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
359 mlx5hws_send_abort_new_dep_wqe(queue);
360 return ret;
361 }
362 /* Skip RX/TX based on the dep_wqe init */
363 ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0_id : 0;
364 ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1_id : 0;
365 /* Action STEs are written to a specific index last to first */
366 ste_attr.direct_index = rule->action_ste.index + action_stes;
367 apply.next_direct_idx = ste_attr.direct_index;
368 } else {
369 apply.next_direct_idx = 0;
370 }
371
372 for (i = total_stes; i-- > 0;) {
373 mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo);
374
375 if (i == 0) {
376 /* Handle last match STE.
377 * For hash split / linear lookup RTCs, packets reaching any STE
378 * will always match and perform the specified actions, which
379 * makes the tag irrelevant.
380 */
381 if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update))
382 mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz,
383 (u8 *)dep_wqe->wqe_data.action);
384 else if (is_update)
385 hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
386
387 /* Rule has dependent WQEs, match dep_wqe is queued */
388 if (action_stes || apply.require_dep)
389 break;
390
391 /* Rule has no dependencies, abort dep_wqe and send WQE now */
392 mlx5hws_send_abort_new_dep_wqe(queue);
393 ste_attr.wqe_tag_is_jumbo = is_jumbo;
394 ste_attr.send_attr.notify_hw = !attr->burst;
395 ste_attr.send_attr.user_data = dep_wqe->user_data;
396 ste_attr.send_attr.rule = dep_wqe->rule;
397 ste_attr.rtc_0 = dep_wqe->rtc_0;
398 ste_attr.rtc_1 = dep_wqe->rtc_1;
399 ste_attr.used_id_rtc_0 = &rule->rtc_0;
400 ste_attr.used_id_rtc_1 = &rule->rtc_1;
401 ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
402 ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
403 ste_attr.direct_index = dep_wqe->direct_index;
404 } else {
405 apply.next_direct_idx = --ste_attr.direct_index;
406 }
407
408 mlx5hws_send_ste(queue, &ste_attr);
409 }
410
411 /* Backup TAG on the rule for deletion and resize info for
412 * moving rules to a new matcher, only after insertion.
413 */
414 if (!is_update)
415 hws_rule_save_delete_info(rule, &ste_attr);
416
417 hws_rule_save_resize_info(rule, &ste_attr);
418 mlx5hws_send_engine_inc_rule(queue);
419
420 if (!attr->burst)
421 mlx5hws_send_all_dep_wqe(queue);
422
423 return 0;
424 }
425
hws_rule_destroy_failed_hws(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)426 static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
427 struct mlx5hws_rule_attr *attr)
428 {
429 struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
430 struct mlx5hws_send_engine *queue;
431
432 queue = &ctx->send_queue[attr->queue_id];
433
434 hws_rule_gen_comp(queue, rule, false,
435 attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
436
437 /* Rule failed now we can safely release action STEs */
438 mlx5hws_rule_free_action_ste(&rule->action_ste);
439
440 /* Perhaps the rule failed updating - release old action STEs as well */
441 mlx5hws_rule_free_action_ste(&rule->old_action_ste);
442
443 /* Clear complex tag */
444 hws_rule_clear_delete_info(rule);
445
446 /* Clear info that was saved for resizing */
447 mlx5hws_rule_clear_resize_info(rule);
448
449 /* If a rule that was indicated as burst (need to trigger HW) has failed
450 * insertion we won't ring the HW as nothing is being written to the WQ.
451 * In such case update the last WQE and ring the HW with that work
452 */
453 if (attr->burst)
454 return;
455
456 mlx5hws_send_all_dep_wqe(queue);
457 mlx5hws_send_engine_flush_queue(queue);
458 }
459
hws_rule_destroy_hws(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)460 static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
461 struct mlx5hws_rule_attr *attr)
462 {
463 bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
464 struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
465 struct mlx5hws_matcher *matcher = rule->matcher;
466 struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
467 struct mlx5hws_send_ste_attr ste_attr = {0};
468 struct mlx5hws_send_engine *queue;
469
470 queue = &ctx->send_queue[attr->queue_id];
471
472 if (unlikely(mlx5hws_send_engine_err(queue))) {
473 hws_rule_destroy_failed_hws(rule, attr);
474 return 0;
475 }
476
477 /* Rule is not completed yet */
478 if (rule->status == MLX5HWS_RULE_STATUS_CREATING ||
479 rule->status == MLX5HWS_RULE_STATUS_UPDATING)
480 return -EBUSY;
481
482 /* Rule failed and doesn't require cleanup */
483 if (rule->status == MLX5HWS_RULE_STATUS_FAILED) {
484 hws_rule_destroy_failed_hws(rule, attr);
485 return 0;
486 }
487
488 if (rule->skip_delete) {
489 /* Rule shouldn't be deleted in HW.
490 * Generate completion as if write succeeded, and we can
491 * safely release action STEs and clear resize info.
492 */
493 hws_rule_gen_comp(queue, rule, false,
494 attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
495
496 mlx5hws_rule_free_action_ste(&rule->action_ste);
497 mlx5hws_rule_clear_resize_info(rule);
498 return 0;
499 }
500
501 mlx5hws_send_engine_inc_rule(queue);
502
503 /* Send dependent WQE */
504 if (!attr->burst)
505 mlx5hws_send_all_dep_wqe(queue);
506
507 rule->status = MLX5HWS_RULE_STATUS_DELETING;
508
509 ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
510 ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
511 ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
512
513 ste_attr.send_attr.rule = rule;
514 ste_attr.send_attr.notify_hw = !attr->burst;
515 ste_attr.send_attr.user_data = attr->user_data;
516
517 ste_attr.rtc_0 = rule->rtc_0;
518 ste_attr.rtc_1 = rule->rtc_1;
519 ste_attr.used_id_rtc_0 = &rule->rtc_0;
520 ste_attr.used_id_rtc_1 = &rule->rtc_1;
521 ste_attr.wqe_ctrl = &wqe_ctrl;
522 ste_attr.wqe_tag_is_jumbo = is_jumbo;
523 ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
524 if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
525 ste_attr.direct_index = attr->rule_idx;
526
527 hws_rule_load_delete_info(rule, &ste_attr);
528 mlx5hws_send_ste(queue, &ste_attr);
529 hws_rule_clear_delete_info(rule);
530
531 return 0;
532 }
533
hws_rule_enqueue_precheck(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)534 static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule,
535 struct mlx5hws_rule_attr *attr)
536 {
537 struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
538
539 if (unlikely(!attr->user_data))
540 return -EINVAL;
541
542 /* Check if there is room in queue */
543 if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id])))
544 return -EBUSY;
545
546 return 0;
547 }
548
hws_rule_enqueue_precheck_move(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)549 static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule,
550 struct mlx5hws_rule_attr *attr)
551 {
552 if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
553 return -EINVAL;
554
555 return hws_rule_enqueue_precheck(rule, attr);
556 }
557
hws_rule_enqueue_precheck_create(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)558 static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule,
559 struct mlx5hws_rule_attr *attr)
560 {
561 if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher)))
562 /* Matcher in resize - new rules are not allowed */
563 return -EAGAIN;
564
565 return hws_rule_enqueue_precheck(rule, attr);
566 }
567
hws_rule_enqueue_precheck_update(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)568 static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule,
569 struct mlx5hws_rule_attr *attr)
570 {
571 struct mlx5hws_matcher *matcher = rule->matcher;
572
573 if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) &&
574 !matcher->attr.optimize_using_rule_idx &&
575 !mlx5hws_matcher_is_insert_by_idx(matcher))) {
576 return -EOPNOTSUPP;
577 }
578
579 if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
580 return -EBUSY;
581
582 return hws_rule_enqueue_precheck_create(rule, attr);
583 }
584
mlx5hws_rule_move_hws_remove(struct mlx5hws_rule * rule,void * queue_ptr,void * user_data)585 int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
586 void *queue_ptr,
587 void *user_data)
588 {
589 bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
590 struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
591 struct mlx5hws_matcher *matcher = rule->matcher;
592 struct mlx5hws_send_engine *queue = queue_ptr;
593 struct mlx5hws_send_ste_attr ste_attr = {0};
594
595 mlx5hws_send_all_dep_wqe(queue);
596
597 rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING;
598
599 ste_attr.send_attr.fence = 0;
600 ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
601 ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
602 ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
603 ste_attr.send_attr.rule = rule;
604 ste_attr.send_attr.notify_hw = 1;
605 ste_attr.send_attr.user_data = user_data;
606 ste_attr.rtc_0 = rule->resize_info->rtc_0;
607 ste_attr.rtc_1 = rule->resize_info->rtc_1;
608 ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
609 ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
610 ste_attr.wqe_ctrl = &empty_wqe_ctrl;
611 ste_attr.wqe_tag_is_jumbo = is_jumbo;
612 ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
613
614 if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
615 ste_attr.direct_index = rule->resize_info->rule_idx;
616
617 hws_rule_load_delete_info(rule, &ste_attr);
618 mlx5hws_send_ste(queue, &ste_attr);
619
620 return 0;
621 }
622
mlx5hws_rule_move_hws_add(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)623 int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
624 struct mlx5hws_rule_attr *attr)
625 {
626 bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
627 struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
628 struct mlx5hws_matcher *matcher = rule->matcher;
629 struct mlx5hws_send_ste_attr ste_attr = {0};
630 struct mlx5hws_send_engine *queue;
631 int ret;
632
633 ret = hws_rule_enqueue_precheck_move(rule, attr);
634 if (unlikely(ret))
635 return ret;
636
637 queue = &ctx->send_queue[attr->queue_id];
638
639 ret = mlx5hws_send_engine_err(queue);
640 if (ret)
641 return ret;
642
643 hws_rule_move_init(rule, attr);
644 hws_rule_move_get_rtc(rule, &ste_attr);
645
646 ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
647 ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
648 ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
649 ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
650 ste_attr.wqe_tag_is_jumbo = is_jumbo;
651
652 ste_attr.send_attr.rule = rule;
653 ste_attr.send_attr.fence = 0;
654 ste_attr.send_attr.notify_hw = !attr->burst;
655 ste_attr.send_attr.user_data = attr->user_data;
656
657 ste_attr.used_id_rtc_0 = &rule->rtc_0;
658 ste_attr.used_id_rtc_1 = &rule->rtc_1;
659 ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
660 ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
661 ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
662 attr->rule_idx : 0;
663
664 mlx5hws_send_ste(queue, &ste_attr);
665 mlx5hws_send_engine_inc_rule(queue);
666
667 if (!attr->burst)
668 mlx5hws_send_all_dep_wqe(queue);
669
670 return 0;
671 }
672
mlx5hws_rule_create(struct mlx5hws_matcher * matcher,u8 mt_idx,u32 * match_param,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * attr,struct mlx5hws_rule * rule_handle)673 int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
674 u8 mt_idx,
675 u32 *match_param,
676 u8 at_idx,
677 struct mlx5hws_rule_action rule_actions[],
678 struct mlx5hws_rule_attr *attr,
679 struct mlx5hws_rule *rule_handle)
680 {
681 int ret;
682
683 rule_handle->matcher = matcher;
684
685 ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
686 if (unlikely(ret))
687 return ret;
688
689 if (unlikely(!(matcher->num_of_mt >= mt_idx) ||
690 !(matcher->num_of_at >= at_idx) ||
691 !match_param)) {
692 pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n");
693 return -EINVAL;
694 }
695
696 ret = hws_rule_create_hws(rule_handle,
697 attr,
698 mt_idx,
699 match_param,
700 at_idx,
701 rule_actions);
702
703 return ret;
704 }
705
mlx5hws_rule_destroy(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)706 int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
707 struct mlx5hws_rule_attr *attr)
708 {
709 int ret;
710
711 ret = hws_rule_enqueue_precheck(rule, attr);
712 if (unlikely(ret))
713 return ret;
714
715 ret = hws_rule_destroy_hws(rule, attr);
716
717 return ret;
718 }
719
mlx5hws_rule_action_update(struct mlx5hws_rule * rule,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * attr)720 int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
721 u8 at_idx,
722 struct mlx5hws_rule_action rule_actions[],
723 struct mlx5hws_rule_attr *attr)
724 {
725 int ret;
726
727 ret = hws_rule_enqueue_precheck_update(rule, attr);
728 if (unlikely(ret))
729 return ret;
730
731 ret = hws_rule_create_hws(rule,
732 attr,
733 0,
734 NULL,
735 at_idx,
736 rule_actions);
737
738 return ret;
739 }
740