1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
3
4 #include "mlx5_core.h"
5 #include "en.h"
6 #include "ipsec.h"
7 #include "lib/crypto.h"
8 #include "lib/ipsec_fs_roce.h"
9 #include "fs_core.h"
10 #include "eswitch.h"
11
12 enum {
13 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
14 MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET,
15 };
16
mlx5_ipsec_device_caps(struct mlx5_core_dev * mdev)17 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
18 {
19 u32 caps = 0;
20
21 if (!MLX5_CAP_GEN(mdev, ipsec_offload))
22 return 0;
23
24 if (!MLX5_CAP_GEN(mdev, log_max_dek))
25 return 0;
26
27 if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
28 MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
29 return 0;
30
31 if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
32 !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
33 return 0;
34
35 if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
36 !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
37 return 0;
38
39 if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
40 MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
41 caps |= MLX5_IPSEC_CAP_CRYPTO;
42
43 if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
44 (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
45 (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
46 is_mdev_legacy_mode(mdev)))) {
47 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
48 reformat_add_esp_trasport) &&
49 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
50 reformat_del_esp_trasport) &&
51 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
52 caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
53
54 if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
55 ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
56 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
57 MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
58 caps |= MLX5_IPSEC_CAP_PRIO;
59
60 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
61 reformat_l2_to_l3_esp_tunnel) &&
62 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
63 reformat_l3_esp_tunnel_to_l2))
64 caps |= MLX5_IPSEC_CAP_TUNNEL;
65
66 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
67 reformat_add_esp_transport_over_udp) &&
68 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
69 reformat_del_esp_transport_over_udp))
70 caps |= MLX5_IPSEC_CAP_ESPINUDP;
71 }
72
73 if (mlx5_get_roce_state(mdev) && mlx5_ipsec_fs_is_mpv_roce_supported(mdev) &&
74 MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
75 MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
76 caps |= MLX5_IPSEC_CAP_ROCE;
77
78 if (!caps)
79 return 0;
80
81 if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
82 caps |= MLX5_IPSEC_CAP_ESN;
83
84 /* We can accommodate up to 2^24 different IPsec objects
85 * because we use up to 24 bit in flow table metadata
86 * to hold the IPsec Object unique handle.
87 */
88 WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
89 return caps;
90 }
91 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
92
mlx5e_ipsec_packet_setup(void * obj,u32 pdn,struct mlx5e_ipsec_sa_entry * sa_entry)93 static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
94 struct mlx5e_ipsec_sa_entry *sa_entry)
95 {
96 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
97 void *aso_ctx;
98
99 aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
100 if (attrs->replay_esn.trigger) {
101 MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
102
103 if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
104 MLX5_SET(ipsec_aso, aso_ctx, window_sz,
105 attrs->replay_esn.replay_window);
106 MLX5_SET(ipsec_aso, aso_ctx, mode,
107 MLX5_IPSEC_ASO_REPLAY_PROTECTION);
108 }
109 MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
110 attrs->replay_esn.esn);
111 }
112
113 /* ASO context */
114 MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
115 MLX5_SET(ipsec_obj, obj, full_offload, 1);
116 MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
117 /* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
118 * in flow steering to perform matching against. Please be
119 * aware that this register was chosen arbitrary and can't
120 * be used in other places as long as IPsec packet offload
121 * active.
122 */
123 MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
124 if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
125 MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
126 if (!attrs->replay_esn.trigger)
127 MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
128 sa_entry->esn_state.esn);
129 }
130
131 if (attrs->lft.hard_packet_limit != XFRM_INF) {
132 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
133 attrs->lft.hard_packet_limit);
134 MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
135 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
136 }
137
138 if (attrs->lft.soft_packet_limit != XFRM_INF) {
139 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
140 attrs->lft.soft_packet_limit);
141
142 MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
143 }
144 }
145
mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)146 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
147 {
148 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
149 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
150 struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
151 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
152 u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
153 void *obj, *salt_p, *salt_iv_p;
154 struct mlx5e_hw_objs *res;
155 int err;
156
157 obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
158
159 /* salt and seq_iv */
160 salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
161 memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
162
163 MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
164 salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
165 memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
166 /* esn */
167 if (attrs->replay_esn.trigger) {
168 MLX5_SET(ipsec_obj, obj, esn_en, 1);
169 MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
170 MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
171 }
172
173 MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
174
175 /* general object fields set */
176 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
177 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
178 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
179 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
180
181 res = &mdev->mlx5e_res.hw_objs;
182 if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
183 mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
184
185 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
186 if (!err)
187 sa_entry->ipsec_obj_id =
188 MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
189
190 return err;
191 }
192
mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)193 static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
194 {
195 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
196 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
197 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
198
199 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
200 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
201 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
202 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
203 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
204
205 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
206 }
207
mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)208 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
209 {
210 struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
211 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
212 int err;
213
214 /* key */
215 err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
216 aes_gcm->key_len / BITS_PER_BYTE,
217 MLX5_ACCEL_OBJ_IPSEC_KEY,
218 &sa_entry->enc_key_id);
219 if (err) {
220 mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
221 return err;
222 }
223
224 err = mlx5_create_ipsec_obj(sa_entry);
225 if (err) {
226 mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
227 goto err_enc_key;
228 }
229
230 return 0;
231
232 err_enc_key:
233 mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
234 return err;
235 }
236
mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)237 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
238 {
239 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
240
241 mlx5_destroy_ipsec_obj(sa_entry);
242 mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
243 }
244
mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)245 static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
246 const struct mlx5_accel_esp_xfrm_attrs *attrs)
247 {
248 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
249 u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
250 u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
251 u64 modify_field_select = 0;
252 u64 general_obj_types;
253 void *obj;
254 int err;
255
256 general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
257 if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
258 return -EINVAL;
259
260 /* general object fields set */
261 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
262 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
263 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
264 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
265 if (err) {
266 mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
267 sa_entry->ipsec_obj_id, err);
268 return err;
269 }
270
271 obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
272 modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
273
274 /* esn */
275 if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
276 !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
277 return -EOPNOTSUPP;
278
279 obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
280 MLX5_SET64(ipsec_obj, obj, modify_field_select,
281 MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
282 MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
283 MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
284 MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
285
286 /* general object fields set */
287 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
288
289 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
290 }
291
mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)292 void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
293 const struct mlx5_accel_esp_xfrm_attrs *attrs)
294 {
295 int err;
296
297 err = mlx5_modify_ipsec_obj(sa_entry, attrs);
298 if (err)
299 return;
300
301 memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
302 }
303
mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)304 static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
305 struct mlx5_wqe_aso_ctrl_seg *data)
306 {
307 data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
308 data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
309 MLX5_ASO_ALWAYS_TRUE << 4;
310
311 mlx5e_ipsec_aso_query(sa_entry, data);
312 }
313
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry,u32 mode_param)314 static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
315 u32 mode_param)
316 {
317 struct mlx5_accel_esp_xfrm_attrs attrs = {};
318 struct mlx5_wqe_aso_ctrl_seg data = {};
319
320 if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
321 sa_entry->esn_state.esn_msb++;
322 sa_entry->esn_state.overlap = 0;
323 } else {
324 sa_entry->esn_state.overlap = 1;
325 }
326
327 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
328
329 /* It is safe to execute the modify below unlocked since the only flows
330 * that could affect this HW object, are create, destroy and this work.
331 *
332 * Creation flow can't co-exist with this modify work, the destruction
333 * flow would cancel this work, and this work is a single entity that
334 * can't conflict with it self.
335 */
336 spin_unlock_bh(&sa_entry->x->lock);
337 mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
338 spin_lock_bh(&sa_entry->x->lock);
339
340 data.data_offset_condition_operand =
341 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
342 data.bitwise_data = cpu_to_be64(BIT_ULL(54));
343 data.data_mask = data.bitwise_data;
344
345 mlx5e_ipsec_aso_update(sa_entry, &data);
346 }
347
mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry * sa_entry)348 static void mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry *sa_entry)
349 {
350 struct mlx5_wqe_aso_ctrl_seg data = {};
351
352 data.data_offset_condition_operand =
353 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
354 data.bitwise_data = cpu_to_be64(BIT_ULL(57) + BIT_ULL(31));
355 data.data_mask = data.bitwise_data;
356 mlx5e_ipsec_aso_update(sa_entry, &data);
357 }
358
mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry * sa_entry,u32 val)359 static void mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry *sa_entry,
360 u32 val)
361 {
362 struct mlx5_wqe_aso_ctrl_seg data = {};
363
364 data.data_offset_condition_operand =
365 MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET;
366 data.bitwise_data = cpu_to_be64(val);
367 data.data_mask = cpu_to_be64(U32_MAX);
368 mlx5e_ipsec_aso_update(sa_entry, &data);
369 }
370
mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry * sa_entry)371 static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry)
372 {
373 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
374 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
375 struct mlx5e_ipsec_aso *aso = ipsec->aso;
376 bool soft_arm, hard_arm;
377 u64 hard_cnt;
378
379 lockdep_assert_held(&sa_entry->x->lock);
380
381 soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm);
382 hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm);
383 if (!soft_arm && !hard_arm)
384 /* It is not lifetime event */
385 return;
386
387 hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
388 if (!hard_cnt || hard_arm) {
389 /* It is possible to see packet counter equal to zero without
390 * hard limit event armed. Such situation can be if packet
391 * decreased, while we handled soft limit event.
392 *
393 * However it will be HW/FW bug if hard limit event is raised
394 * and packet counter is not zero.
395 */
396 WARN_ON_ONCE(hard_arm && hard_cnt);
397
398 /* Notify about hard limit */
399 xfrm_state_check_expire(sa_entry->x);
400 return;
401 }
402
403 /* We are in soft limit event. */
404 if (!sa_entry->limits.soft_limit_hit &&
405 sa_entry->limits.round == attrs->lft.numb_rounds_soft) {
406 sa_entry->limits.soft_limit_hit = true;
407 /* Notify about soft limit */
408 xfrm_state_check_expire(sa_entry->x);
409
410 if (sa_entry->limits.round == attrs->lft.numb_rounds_hard)
411 goto hard;
412
413 if (attrs->lft.soft_packet_limit > BIT_ULL(31)) {
414 /* We cannot avoid a soft_value that might have the high
415 * bit set. For instance soft_value=2^31+1 cannot be
416 * adjusted to the low bit clear version of soft_value=1
417 * because it is too close to 0.
418 *
419 * Thus we have this corner case where we can hit the
420 * soft_limit with the high bit set, but cannot adjust
421 * the counter. Thus we set a temporary interrupt_value
422 * at least 2^30 away from here and do the adjustment
423 * then.
424 */
425 mlx5e_ipsec_aso_update_soft(sa_entry,
426 BIT_ULL(31) - BIT_ULL(30));
427 sa_entry->limits.fix_limit = true;
428 return;
429 }
430
431 sa_entry->limits.fix_limit = true;
432 }
433
434 hard:
435 if (sa_entry->limits.round == attrs->lft.numb_rounds_hard) {
436 mlx5e_ipsec_aso_update_soft(sa_entry, 0);
437 attrs->lft.soft_packet_limit = XFRM_INF;
438 return;
439 }
440
441 mlx5e_ipsec_aso_update_hard(sa_entry);
442 sa_entry->limits.round++;
443 if (sa_entry->limits.round == attrs->lft.numb_rounds_soft)
444 mlx5e_ipsec_aso_update_soft(sa_entry,
445 attrs->lft.soft_packet_limit);
446 if (sa_entry->limits.fix_limit) {
447 sa_entry->limits.fix_limit = false;
448 mlx5e_ipsec_aso_update_soft(sa_entry, BIT_ULL(31) - 1);
449 }
450 }
451
mlx5e_ipsec_handle_event(struct work_struct * _work)452 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
453 {
454 struct mlx5e_ipsec_work *work =
455 container_of(_work, struct mlx5e_ipsec_work, work);
456 struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
457 struct mlx5_accel_esp_xfrm_attrs *attrs;
458 struct mlx5e_ipsec_aso *aso;
459 int ret;
460
461 aso = sa_entry->ipsec->aso;
462 attrs = &sa_entry->attrs;
463
464 spin_lock_bh(&sa_entry->x->lock);
465 ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
466 if (ret)
467 goto unlock;
468
469 if (attrs->replay_esn.trigger &&
470 !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
471 u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
472
473 mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
474 }
475
476 if (attrs->lft.soft_packet_limit != XFRM_INF)
477 mlx5e_ipsec_handle_limits(sa_entry);
478
479 unlock:
480 spin_unlock_bh(&sa_entry->x->lock);
481 kfree(work);
482 }
483
mlx5e_ipsec_event(struct notifier_block * nb,unsigned long event,void * data)484 static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
485 void *data)
486 {
487 struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
488 struct mlx5e_ipsec_sa_entry *sa_entry;
489 struct mlx5_eqe_obj_change *object;
490 struct mlx5e_ipsec_work *work;
491 struct mlx5_eqe *eqe = data;
492 u16 type;
493
494 if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
495 return NOTIFY_DONE;
496
497 object = &eqe->data.obj_change;
498 type = be16_to_cpu(object->obj_type);
499
500 if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
501 return NOTIFY_DONE;
502
503 sa_entry = xa_load(&ipsec->sadb, be32_to_cpu(object->obj_id));
504 if (!sa_entry)
505 return NOTIFY_DONE;
506
507 work = kmalloc(sizeof(*work), GFP_ATOMIC);
508 if (!work)
509 return NOTIFY_DONE;
510
511 INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
512 work->data = sa_entry;
513
514 queue_work(ipsec->wq, &work->work);
515 return NOTIFY_OK;
516 }
517
mlx5e_ipsec_aso_init(struct mlx5e_ipsec * ipsec)518 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
519 {
520 struct mlx5_core_dev *mdev = ipsec->mdev;
521 struct mlx5e_ipsec_aso *aso;
522 struct mlx5e_hw_objs *res;
523 struct device *pdev;
524 int err;
525
526 aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
527 if (!aso)
528 return -ENOMEM;
529
530 res = &mdev->mlx5e_res.hw_objs;
531
532 pdev = mlx5_core_dma_dev(mdev);
533 aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
534 DMA_BIDIRECTIONAL);
535 err = dma_mapping_error(pdev, aso->dma_addr);
536 if (err)
537 goto err_dma;
538
539 aso->aso = mlx5_aso_create(mdev, res->pdn);
540 if (IS_ERR(aso->aso)) {
541 err = PTR_ERR(aso->aso);
542 goto err_aso_create;
543 }
544
545 spin_lock_init(&aso->lock);
546 ipsec->nb.notifier_call = mlx5e_ipsec_event;
547 mlx5_notifier_register(mdev, &ipsec->nb);
548
549 ipsec->aso = aso;
550 return 0;
551
552 err_aso_create:
553 dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
554 DMA_BIDIRECTIONAL);
555 err_dma:
556 kfree(aso);
557 return err;
558 }
559
mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec * ipsec)560 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
561 {
562 struct mlx5_core_dev *mdev = ipsec->mdev;
563 struct mlx5e_ipsec_aso *aso;
564 struct device *pdev;
565
566 aso = ipsec->aso;
567 pdev = mlx5_core_dma_dev(mdev);
568
569 mlx5_notifier_unregister(mdev, &ipsec->nb);
570 mlx5_aso_destroy(aso->aso);
571 dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
572 DMA_BIDIRECTIONAL);
573 kfree(aso);
574 ipsec->aso = NULL;
575 }
576
mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg * ctrl,struct mlx5_wqe_aso_ctrl_seg * data)577 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
578 struct mlx5_wqe_aso_ctrl_seg *data)
579 {
580 if (!data)
581 return;
582
583 ctrl->data_mask_mode = data->data_mask_mode;
584 ctrl->condition_1_0_operand = data->condition_1_0_operand;
585 ctrl->condition_1_0_offset = data->condition_1_0_offset;
586 ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
587 ctrl->condition_0_data = data->condition_0_data;
588 ctrl->condition_0_mask = data->condition_0_mask;
589 ctrl->condition_1_data = data->condition_1_data;
590 ctrl->condition_1_mask = data->condition_1_mask;
591 ctrl->bitwise_data = data->bitwise_data;
592 ctrl->data_mask = data->data_mask;
593 }
594
mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)595 int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
596 struct mlx5_wqe_aso_ctrl_seg *data)
597 {
598 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
599 struct mlx5e_ipsec_aso *aso = ipsec->aso;
600 struct mlx5_core_dev *mdev = ipsec->mdev;
601 struct mlx5_wqe_aso_ctrl_seg *ctrl;
602 struct mlx5e_hw_objs *res;
603 struct mlx5_aso_wqe *wqe;
604 unsigned long expires;
605 u8 ds_cnt;
606 int ret;
607
608 lockdep_assert_held(&sa_entry->x->lock);
609 res = &mdev->mlx5e_res.hw_objs;
610
611 spin_lock_bh(&aso->lock);
612 memset(aso->ctx, 0, sizeof(aso->ctx));
613 wqe = mlx5_aso_get_wqe(aso->aso);
614 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
615 mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
616 MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
617
618 ctrl = &wqe->aso_ctrl;
619 ctrl->va_l =
620 cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
621 ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
622 ctrl->l_key = cpu_to_be32(res->mkey);
623 mlx5e_ipsec_aso_copy(ctrl, data);
624
625 mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
626 expires = jiffies + msecs_to_jiffies(10);
627 do {
628 ret = mlx5_aso_poll_cq(aso->aso, false);
629 if (ret)
630 /* We are in atomic context */
631 udelay(10);
632 } while (ret && time_is_after_jiffies(expires));
633 spin_unlock_bh(&aso->lock);
634 return ret;
635 }
636