1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell MACSEC hardware offload driver
3  *
4  * Copyright (C) 2022 Marvell.
5  */
6 
7 #include <crypto/skcipher.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/bitfield.h>
10 #include "otx2_common.h"
11 
12 #define MCS_TCAM0_MAC_DA_MASK		GENMASK_ULL(47, 0)
13 #define MCS_TCAM0_MAC_SA_MASK		GENMASK_ULL(63, 48)
14 #define MCS_TCAM1_MAC_SA_MASK		GENMASK_ULL(31, 0)
15 #define MCS_TCAM1_ETYPE_MASK		GENMASK_ULL(47, 32)
16 
17 #define MCS_SA_MAP_MEM_SA_USE		BIT_ULL(9)
18 
19 #define MCS_RX_SECY_PLCY_RW_MASK	GENMASK_ULL(49, 18)
20 #define MCS_RX_SECY_PLCY_RP		BIT_ULL(17)
21 #define MCS_RX_SECY_PLCY_AUTH_ENA	BIT_ULL(16)
22 #define MCS_RX_SECY_PLCY_CIP		GENMASK_ULL(8, 5)
23 #define MCS_RX_SECY_PLCY_VAL		GENMASK_ULL(2, 1)
24 #define MCS_RX_SECY_PLCY_ENA		BIT_ULL(0)
25 
26 #define MCS_TX_SECY_PLCY_MTU		GENMASK_ULL(43, 28)
27 #define MCS_TX_SECY_PLCY_ST_TCI		GENMASK_ULL(27, 22)
28 #define MCS_TX_SECY_PLCY_ST_OFFSET	GENMASK_ULL(21, 15)
29 #define MCS_TX_SECY_PLCY_INS_MODE	BIT_ULL(14)
30 #define MCS_TX_SECY_PLCY_AUTH_ENA	BIT_ULL(13)
31 #define MCS_TX_SECY_PLCY_CIP		GENMASK_ULL(5, 2)
32 #define MCS_TX_SECY_PLCY_PROTECT	BIT_ULL(1)
33 #define MCS_TX_SECY_PLCY_ENA		BIT_ULL(0)
34 
35 #define MCS_GCM_AES_128			0
36 #define MCS_GCM_AES_256			1
37 #define MCS_GCM_AES_XPN_128		2
38 #define MCS_GCM_AES_XPN_256		3
39 
40 #define MCS_TCI_ES			0x40 /* end station */
41 #define MCS_TCI_SC			0x20 /* SCI present */
42 #define MCS_TCI_SCB			0x10 /* epon */
43 #define MCS_TCI_E			0x08 /* encryption */
44 #define MCS_TCI_C			0x04 /* changed text */
45 
46 #define CN10K_MAX_HASH_LEN		16
47 #define CN10K_MAX_SAK_LEN		32
48 
cn10k_ecb_aes_encrypt(struct otx2_nic * pfvf,u8 * sak,u16 sak_len,u8 * hash)49 static int cn10k_ecb_aes_encrypt(struct otx2_nic *pfvf, u8 *sak,
50 				 u16 sak_len, u8 *hash)
51 {
52 	u8 data[CN10K_MAX_HASH_LEN] = { 0 };
53 	struct skcipher_request *req = NULL;
54 	struct scatterlist sg_src, sg_dst;
55 	struct crypto_skcipher *tfm;
56 	DECLARE_CRYPTO_WAIT(wait);
57 	int err;
58 
59 	tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
60 	if (IS_ERR(tfm)) {
61 		dev_err(pfvf->dev, "failed to allocate transform for ecb-aes\n");
62 		return PTR_ERR(tfm);
63 	}
64 
65 	req = skcipher_request_alloc(tfm, GFP_KERNEL);
66 	if (!req) {
67 		dev_err(pfvf->dev, "failed to allocate request for skcipher\n");
68 		err = -ENOMEM;
69 		goto free_tfm;
70 	}
71 
72 	err = crypto_skcipher_setkey(tfm, sak, sak_len);
73 	if (err) {
74 		dev_err(pfvf->dev, "failed to set key for skcipher\n");
75 		goto free_req;
76 	}
77 
78 	/* build sg list */
79 	sg_init_one(&sg_src, data, CN10K_MAX_HASH_LEN);
80 	sg_init_one(&sg_dst, hash, CN10K_MAX_HASH_LEN);
81 
82 	skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
83 	skcipher_request_set_crypt(req, &sg_src, &sg_dst,
84 				   CN10K_MAX_HASH_LEN, NULL);
85 
86 	err = crypto_skcipher_encrypt(req);
87 	err = crypto_wait_req(err, &wait);
88 
89 free_req:
90 	skcipher_request_free(req);
91 free_tfm:
92 	crypto_free_skcipher(tfm);
93 	return err;
94 }
95 
cn10k_mcs_get_txsc(struct cn10k_mcs_cfg * cfg,struct macsec_secy * secy)96 static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
97 						 struct macsec_secy *secy)
98 {
99 	struct cn10k_mcs_txsc *txsc;
100 
101 	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
102 		if (txsc->sw_secy == secy)
103 			return txsc;
104 	}
105 
106 	return NULL;
107 }
108 
cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg * cfg,struct macsec_secy * secy,struct macsec_rx_sc * rx_sc)109 static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
110 						 struct macsec_secy *secy,
111 						 struct macsec_rx_sc *rx_sc)
112 {
113 	struct cn10k_mcs_rxsc *rxsc;
114 
115 	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
116 		if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
117 			return rxsc;
118 	}
119 
120 	return NULL;
121 }
122 
rsrc_name(enum mcs_rsrc_type rsrc_type)123 static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
124 {
125 	switch (rsrc_type) {
126 	case MCS_RSRC_TYPE_FLOWID:
127 		return "FLOW";
128 	case MCS_RSRC_TYPE_SC:
129 		return "SC";
130 	case MCS_RSRC_TYPE_SECY:
131 		return "SECY";
132 	case MCS_RSRC_TYPE_SA:
133 		return "SA";
134 	default:
135 		return "Unknown";
136 	}
137 }
138 
cn10k_mcs_alloc_rsrc(struct otx2_nic * pfvf,enum mcs_direction dir,enum mcs_rsrc_type type,u16 * rsrc_id)139 static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
140 				enum mcs_rsrc_type type, u16 *rsrc_id)
141 {
142 	struct mbox *mbox = &pfvf->mbox;
143 	struct mcs_alloc_rsrc_req *req;
144 	struct mcs_alloc_rsrc_rsp *rsp;
145 	int ret = -ENOMEM;
146 
147 	mutex_lock(&mbox->lock);
148 
149 	req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
150 	if (!req)
151 		goto fail;
152 
153 	req->rsrc_type = type;
154 	req->rsrc_cnt  = 1;
155 	req->dir = dir;
156 
157 	ret = otx2_sync_mbox_msg(mbox);
158 	if (ret)
159 		goto fail;
160 
161 	rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
162 							     0, &req->hdr);
163 	if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
164 	    req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
165 		ret = -EINVAL;
166 		goto fail;
167 	}
168 
169 	switch (rsp->rsrc_type) {
170 	case MCS_RSRC_TYPE_FLOWID:
171 		*rsrc_id = rsp->flow_ids[0];
172 		break;
173 	case MCS_RSRC_TYPE_SC:
174 		*rsrc_id = rsp->sc_ids[0];
175 		break;
176 	case MCS_RSRC_TYPE_SECY:
177 		*rsrc_id = rsp->secy_ids[0];
178 		break;
179 	case MCS_RSRC_TYPE_SA:
180 		*rsrc_id = rsp->sa_ids[0];
181 		break;
182 	default:
183 		ret = -EINVAL;
184 		goto fail;
185 	}
186 
187 	mutex_unlock(&mbox->lock);
188 
189 	return 0;
190 fail:
191 	dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
192 		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
193 	mutex_unlock(&mbox->lock);
194 	return ret;
195 }
196 
cn10k_mcs_free_rsrc(struct otx2_nic * pfvf,enum mcs_direction dir,enum mcs_rsrc_type type,u16 hw_rsrc_id,bool all)197 static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
198 				enum mcs_rsrc_type type, u16 hw_rsrc_id,
199 				bool all)
200 {
201 	struct mcs_clear_stats *clear_req;
202 	struct mbox *mbox = &pfvf->mbox;
203 	struct mcs_free_rsrc_req *req;
204 
205 	mutex_lock(&mbox->lock);
206 
207 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
208 	if (!clear_req)
209 		goto fail;
210 
211 	clear_req->id = hw_rsrc_id;
212 	clear_req->type = type;
213 	clear_req->dir = dir;
214 
215 	req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
216 	if (!req)
217 		goto fail;
218 
219 	req->rsrc_id = hw_rsrc_id;
220 	req->rsrc_type = type;
221 	req->dir = dir;
222 	if (all)
223 		req->all = 1;
224 
225 	if (otx2_sync_mbox_msg(&pfvf->mbox))
226 		goto fail;
227 
228 	mutex_unlock(&mbox->lock);
229 
230 	return;
231 fail:
232 	dev_err(pfvf->dev, "Failed to free %s %s resource\n",
233 		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
234 	mutex_unlock(&mbox->lock);
235 }
236 
cn10k_mcs_alloc_txsa(struct otx2_nic * pfvf,u16 * hw_sa_id)237 static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
238 {
239 	return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
240 }
241 
cn10k_mcs_alloc_rxsa(struct otx2_nic * pfvf,u16 * hw_sa_id)242 static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
243 {
244 	return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
245 }
246 
cn10k_mcs_free_txsa(struct otx2_nic * pfvf,u16 hw_sa_id)247 static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
248 {
249 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
250 }
251 
cn10k_mcs_free_rxsa(struct otx2_nic * pfvf,u16 hw_sa_id)252 static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
253 {
254 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
255 }
256 
cn10k_mcs_write_rx_secy(struct otx2_nic * pfvf,struct macsec_secy * secy,u8 hw_secy_id)257 static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
258 				   struct macsec_secy *secy, u8 hw_secy_id)
259 {
260 	struct mcs_secy_plcy_write_req *req;
261 	struct mbox *mbox = &pfvf->mbox;
262 	u64 policy;
263 	u8 cipher;
264 	int ret;
265 
266 	mutex_lock(&mbox->lock);
267 
268 	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
269 	if (!req) {
270 		ret = -ENOMEM;
271 		goto fail;
272 	}
273 
274 	policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
275 	if (secy->replay_protect)
276 		policy |= MCS_RX_SECY_PLCY_RP;
277 
278 	policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
279 
280 	switch (secy->key_len) {
281 	case 16:
282 		cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
283 		break;
284 	case 32:
285 		cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
286 		break;
287 	default:
288 		cipher = MCS_GCM_AES_128;
289 		dev_warn(pfvf->dev, "Unsupported key length\n");
290 		break;
291 	}
292 
293 	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, cipher);
294 	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
295 
296 	policy |= MCS_RX_SECY_PLCY_ENA;
297 
298 	req->plcy = policy;
299 	req->secy_id = hw_secy_id;
300 	req->dir = MCS_RX;
301 
302 	ret = otx2_sync_mbox_msg(mbox);
303 
304 fail:
305 	mutex_unlock(&mbox->lock);
306 	return ret;
307 }
308 
cn10k_mcs_write_rx_flowid(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 hw_secy_id)309 static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
310 				     struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
311 {
312 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
313 	struct macsec_secy *secy = rxsc->sw_secy;
314 	struct mcs_flowid_entry_write_req *req;
315 	struct mbox *mbox = &pfvf->mbox;
316 	u64 mac_da;
317 	int ret;
318 
319 	mutex_lock(&mbox->lock);
320 
321 	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
322 	if (!req) {
323 		ret = -ENOMEM;
324 		goto fail;
325 	}
326 
327 	mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
328 
329 	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
330 	req->mask[0] = ~0ULL;
331 	req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
332 
333 	req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
334 	req->mask[1] = ~0ULL;
335 	req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
336 
337 	req->mask[2] = ~0ULL;
338 	req->mask[3] = ~0ULL;
339 
340 	req->flow_id = rxsc->hw_flow_id;
341 	req->secy_id = hw_secy_id;
342 	req->sc_id = rxsc->hw_sc_id;
343 	req->dir = MCS_RX;
344 
345 	if (sw_rx_sc->active)
346 		req->ena = 1;
347 
348 	ret = otx2_sync_mbox_msg(mbox);
349 
350 fail:
351 	mutex_unlock(&mbox->lock);
352 	return ret;
353 }
354 
cn10k_mcs_write_sc_cam(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 hw_secy_id)355 static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
356 				  struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
357 {
358 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
359 	struct mcs_rx_sc_cam_write_req *sc_req;
360 	struct mbox *mbox = &pfvf->mbox;
361 	int ret;
362 
363 	mutex_lock(&mbox->lock);
364 
365 	sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
366 	if (!sc_req) {
367 		ret = -ENOMEM;
368 		goto fail;
369 	}
370 
371 	sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
372 	sc_req->sc_id = rxsc->hw_sc_id;
373 	sc_req->secy_id = hw_secy_id;
374 
375 	ret = otx2_sync_mbox_msg(mbox);
376 
377 fail:
378 	mutex_unlock(&mbox->lock);
379 	return ret;
380 }
381 
cn10k_mcs_write_keys(struct otx2_nic * pfvf,struct macsec_secy * secy,struct mcs_sa_plcy_write_req * req,u8 * sak,u8 * salt,ssci_t ssci)382 static int cn10k_mcs_write_keys(struct otx2_nic *pfvf,
383 				struct macsec_secy *secy,
384 				struct mcs_sa_plcy_write_req *req,
385 				u8 *sak, u8 *salt, ssci_t ssci)
386 {
387 	u8 hash_rev[CN10K_MAX_HASH_LEN];
388 	u8 sak_rev[CN10K_MAX_SAK_LEN];
389 	u8 salt_rev[MACSEC_SALT_LEN];
390 	u8 hash[CN10K_MAX_HASH_LEN];
391 	u32 ssci_63_32;
392 	int err, i;
393 
394 	err = cn10k_ecb_aes_encrypt(pfvf, sak, secy->key_len, hash);
395 	if (err) {
396 		dev_err(pfvf->dev, "Generating hash using ECB(AES) failed\n");
397 		return err;
398 	}
399 
400 	for (i = 0; i < secy->key_len; i++)
401 		sak_rev[i] = sak[secy->key_len - 1 - i];
402 
403 	for (i = 0; i < CN10K_MAX_HASH_LEN; i++)
404 		hash_rev[i] = hash[CN10K_MAX_HASH_LEN - 1 - i];
405 
406 	for (i = 0; i < MACSEC_SALT_LEN; i++)
407 		salt_rev[i] = salt[MACSEC_SALT_LEN - 1 - i];
408 
409 	ssci_63_32 = (__force u32)cpu_to_be32((__force u32)ssci);
410 
411 	memcpy(&req->plcy[0][0], sak_rev, secy->key_len);
412 	memcpy(&req->plcy[0][4], hash_rev, CN10K_MAX_HASH_LEN);
413 	memcpy(&req->plcy[0][6], salt_rev, MACSEC_SALT_LEN);
414 	req->plcy[0][7] |= (u64)ssci_63_32 << 32;
415 
416 	return 0;
417 }
418 
cn10k_mcs_write_rx_sa_plcy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_rxsc * rxsc,u8 assoc_num,bool sa_in_use)419 static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
420 				      struct macsec_secy *secy,
421 				      struct cn10k_mcs_rxsc *rxsc,
422 				      u8 assoc_num, bool sa_in_use)
423 {
424 	struct mcs_sa_plcy_write_req *plcy_req;
425 	u8 *sak = rxsc->sa_key[assoc_num];
426 	u8 *salt = rxsc->salt[assoc_num];
427 	struct mcs_rx_sc_sa_map *map_req;
428 	struct mbox *mbox = &pfvf->mbox;
429 	int ret;
430 
431 	mutex_lock(&mbox->lock);
432 
433 	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
434 	if (!plcy_req) {
435 		ret = -ENOMEM;
436 		goto fail;
437 	}
438 
439 	map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
440 	if (!map_req) {
441 		otx2_mbox_reset(&mbox->mbox, 0);
442 		ret = -ENOMEM;
443 		goto fail;
444 	}
445 
446 	ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
447 				   salt, rxsc->ssci[assoc_num]);
448 	if (ret)
449 		goto fail;
450 
451 	plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
452 	plcy_req->sa_cnt = 1;
453 	plcy_req->dir = MCS_RX;
454 
455 	map_req->sa_index = rxsc->hw_sa_id[assoc_num];
456 	map_req->sa_in_use = sa_in_use;
457 	map_req->sc_id = rxsc->hw_sc_id;
458 	map_req->an = assoc_num;
459 
460 	/* Send two messages together */
461 	ret = otx2_sync_mbox_msg(mbox);
462 
463 fail:
464 	mutex_unlock(&mbox->lock);
465 	return ret;
466 }
467 
cn10k_mcs_write_rx_sa_pn(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 assoc_num,u64 next_pn)468 static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
469 				    struct cn10k_mcs_rxsc *rxsc,
470 				    u8 assoc_num, u64 next_pn)
471 {
472 	struct mcs_pn_table_write_req *req;
473 	struct mbox *mbox = &pfvf->mbox;
474 	int ret;
475 
476 	mutex_lock(&mbox->lock);
477 
478 	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
479 	if (!req) {
480 		ret = -ENOMEM;
481 		goto fail;
482 	}
483 
484 	req->pn_id = rxsc->hw_sa_id[assoc_num];
485 	req->next_pn = next_pn;
486 	req->dir = MCS_RX;
487 
488 	ret = otx2_sync_mbox_msg(mbox);
489 
490 fail:
491 	mutex_unlock(&mbox->lock);
492 	return ret;
493 }
494 
cn10k_mcs_write_tx_secy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)495 static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
496 				   struct macsec_secy *secy,
497 				   struct cn10k_mcs_txsc *txsc)
498 {
499 	struct mcs_secy_plcy_write_req *req;
500 	struct mbox *mbox = &pfvf->mbox;
501 	struct macsec_tx_sc *sw_tx_sc;
502 	u8 sectag_tci = 0;
503 	u8 tag_offset;
504 	u64 policy;
505 	u8 cipher;
506 	int ret;
507 
508 	/* Insert SecTag after 12 bytes (DA+SA) or 16 bytes
509 	 * if VLAN tag needs to be sent in clear text.
510 	 */
511 	tag_offset = txsc->vlan_dev ? 16 : 12;
512 	sw_tx_sc = &secy->tx_sc;
513 
514 	mutex_lock(&mbox->lock);
515 
516 	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
517 	if (!req) {
518 		ret = -ENOMEM;
519 		goto fail;
520 	}
521 
522 	if (sw_tx_sc->send_sci) {
523 		sectag_tci |= MCS_TCI_SC;
524 	} else {
525 		if (sw_tx_sc->end_station)
526 			sectag_tci |= MCS_TCI_ES;
527 		if (sw_tx_sc->scb)
528 			sectag_tci |= MCS_TCI_SCB;
529 	}
530 
531 	if (sw_tx_sc->encrypt)
532 		sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
533 
534 	policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
535 	/* Write SecTag excluding AN bits(1..0) */
536 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
537 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
538 	policy |= MCS_TX_SECY_PLCY_INS_MODE;
539 	policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
540 
541 	switch (secy->key_len) {
542 	case 16:
543 		cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
544 		break;
545 	case 32:
546 		cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
547 		break;
548 	default:
549 		cipher = MCS_GCM_AES_128;
550 		dev_warn(pfvf->dev, "Unsupported key length\n");
551 		break;
552 	}
553 
554 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, cipher);
555 
556 	if (secy->protect_frames)
557 		policy |= MCS_TX_SECY_PLCY_PROTECT;
558 
559 	/* If the encodingsa does not exist/active and protect is
560 	 * not set then frames can be sent out as it is. Hence enable
561 	 * the policy irrespective of secy operational when !protect.
562 	 */
563 	if (!secy->protect_frames || secy->operational)
564 		policy |= MCS_TX_SECY_PLCY_ENA;
565 
566 	req->plcy = policy;
567 	req->secy_id = txsc->hw_secy_id_tx;
568 	req->dir = MCS_TX;
569 
570 	ret = otx2_sync_mbox_msg(mbox);
571 
572 fail:
573 	mutex_unlock(&mbox->lock);
574 	return ret;
575 }
576 
cn10k_mcs_write_tx_flowid(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)577 static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
578 				     struct macsec_secy *secy,
579 				     struct cn10k_mcs_txsc *txsc)
580 {
581 	struct mcs_flowid_entry_write_req *req;
582 	struct mbox *mbox = &pfvf->mbox;
583 	u64 mac_sa;
584 	int ret;
585 
586 	mutex_lock(&mbox->lock);
587 
588 	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
589 	if (!req) {
590 		ret = -ENOMEM;
591 		goto fail;
592 	}
593 
594 	mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
595 
596 	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
597 	req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
598 
599 	req->mask[0] = ~0ULL;
600 	req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
601 
602 	req->mask[1] = ~0ULL;
603 	req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
604 
605 	req->mask[2] = ~0ULL;
606 	req->mask[3] = ~0ULL;
607 
608 	req->flow_id = txsc->hw_flow_id;
609 	req->secy_id = txsc->hw_secy_id_tx;
610 	req->sc_id = txsc->hw_sc_id;
611 	req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
612 	req->dir = MCS_TX;
613 	/* This can be enabled since stack xmits packets only when interface is up */
614 	req->ena = 1;
615 
616 	ret = otx2_sync_mbox_msg(mbox);
617 
618 fail:
619 	mutex_unlock(&mbox->lock);
620 	return ret;
621 }
622 
cn10k_mcs_link_tx_sa2sc(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,u8 sa_num,bool sa_active)623 static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
624 				   struct macsec_secy *secy,
625 				   struct cn10k_mcs_txsc *txsc,
626 				   u8 sa_num, bool sa_active)
627 {
628 	struct mcs_tx_sc_sa_map *map_req;
629 	struct mbox *mbox = &pfvf->mbox;
630 	int ret;
631 
632 	/* Link the encoding_sa only to SC out of all SAs */
633 	if (txsc->encoding_sa != sa_num)
634 		return 0;
635 
636 	mutex_lock(&mbox->lock);
637 
638 	map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
639 	if (!map_req) {
640 		otx2_mbox_reset(&mbox->mbox, 0);
641 		ret = -ENOMEM;
642 		goto fail;
643 	}
644 
645 	map_req->sa_index0 = txsc->hw_sa_id[sa_num];
646 	map_req->sa_index0_vld = sa_active;
647 	map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
648 	map_req->sc_id = txsc->hw_sc_id;
649 
650 	ret = otx2_sync_mbox_msg(mbox);
651 
652 fail:
653 	mutex_unlock(&mbox->lock);
654 	return ret;
655 }
656 
cn10k_mcs_write_tx_sa_plcy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,u8 assoc_num)657 static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
658 				      struct macsec_secy *secy,
659 				      struct cn10k_mcs_txsc *txsc,
660 				      u8 assoc_num)
661 {
662 	struct mcs_sa_plcy_write_req *plcy_req;
663 	u8 *sak = txsc->sa_key[assoc_num];
664 	u8 *salt = txsc->salt[assoc_num];
665 	struct mbox *mbox = &pfvf->mbox;
666 	int ret;
667 
668 	mutex_lock(&mbox->lock);
669 
670 	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
671 	if (!plcy_req) {
672 		ret = -ENOMEM;
673 		goto fail;
674 	}
675 
676 	ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
677 				   salt, txsc->ssci[assoc_num]);
678 	if (ret)
679 		goto fail;
680 
681 	plcy_req->plcy[0][8] = assoc_num;
682 	plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
683 	plcy_req->sa_cnt = 1;
684 	plcy_req->dir = MCS_TX;
685 
686 	ret = otx2_sync_mbox_msg(mbox);
687 
688 fail:
689 	mutex_unlock(&mbox->lock);
690 	return ret;
691 }
692 
cn10k_write_tx_sa_pn(struct otx2_nic * pfvf,struct cn10k_mcs_txsc * txsc,u8 assoc_num,u64 next_pn)693 static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
694 				struct cn10k_mcs_txsc *txsc,
695 				u8 assoc_num, u64 next_pn)
696 {
697 	struct mcs_pn_table_write_req *req;
698 	struct mbox *mbox = &pfvf->mbox;
699 	int ret;
700 
701 	mutex_lock(&mbox->lock);
702 
703 	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
704 	if (!req) {
705 		ret = -ENOMEM;
706 		goto fail;
707 	}
708 
709 	req->pn_id = txsc->hw_sa_id[assoc_num];
710 	req->next_pn = next_pn;
711 	req->dir = MCS_TX;
712 
713 	ret = otx2_sync_mbox_msg(mbox);
714 
715 fail:
716 	mutex_unlock(&mbox->lock);
717 	return ret;
718 }
719 
cn10k_mcs_ena_dis_flowid(struct otx2_nic * pfvf,u16 hw_flow_id,bool enable,enum mcs_direction dir)720 static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
721 				    bool enable, enum mcs_direction dir)
722 {
723 	struct mcs_flowid_ena_dis_entry *req;
724 	struct mbox *mbox = &pfvf->mbox;
725 	int ret;
726 
727 	mutex_lock(&mbox->lock);
728 
729 	req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
730 	if (!req) {
731 		ret = -ENOMEM;
732 		goto fail;
733 	}
734 
735 	req->flow_id = hw_flow_id;
736 	req->ena = enable;
737 	req->dir = dir;
738 
739 	ret = otx2_sync_mbox_msg(mbox);
740 
741 fail:
742 	mutex_unlock(&mbox->lock);
743 	return ret;
744 }
745 
cn10k_mcs_sa_stats(struct otx2_nic * pfvf,u8 hw_sa_id,struct mcs_sa_stats * rsp_p,enum mcs_direction dir,bool clear)746 static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
747 			      struct mcs_sa_stats *rsp_p,
748 			      enum mcs_direction dir, bool clear)
749 {
750 	struct mcs_clear_stats *clear_req;
751 	struct mbox *mbox = &pfvf->mbox;
752 	struct mcs_stats_req *req;
753 	struct mcs_sa_stats *rsp;
754 	int ret;
755 
756 	mutex_lock(&mbox->lock);
757 
758 	req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
759 	if (!req) {
760 		ret = -ENOMEM;
761 		goto fail;
762 	}
763 
764 	req->id = hw_sa_id;
765 	req->dir = dir;
766 
767 	if (!clear)
768 		goto send_msg;
769 
770 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
771 	if (!clear_req) {
772 		ret = -ENOMEM;
773 		goto fail;
774 	}
775 	clear_req->id = hw_sa_id;
776 	clear_req->dir = dir;
777 	clear_req->type = MCS_RSRC_TYPE_SA;
778 
779 send_msg:
780 	ret = otx2_sync_mbox_msg(mbox);
781 	if (ret)
782 		goto fail;
783 
784 	rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
785 						       0, &req->hdr);
786 	if (IS_ERR(rsp)) {
787 		ret = PTR_ERR(rsp);
788 		goto fail;
789 	}
790 
791 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
792 
793 	mutex_unlock(&mbox->lock);
794 
795 	return 0;
796 fail:
797 	mutex_unlock(&mbox->lock);
798 	return ret;
799 }
800 
cn10k_mcs_sc_stats(struct otx2_nic * pfvf,u8 hw_sc_id,struct mcs_sc_stats * rsp_p,enum mcs_direction dir,bool clear)801 static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
802 			      struct mcs_sc_stats *rsp_p,
803 			      enum mcs_direction dir, bool clear)
804 {
805 	struct mcs_clear_stats *clear_req;
806 	struct mbox *mbox = &pfvf->mbox;
807 	struct mcs_stats_req *req;
808 	struct mcs_sc_stats *rsp;
809 	int ret;
810 
811 	mutex_lock(&mbox->lock);
812 
813 	req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
814 	if (!req) {
815 		ret = -ENOMEM;
816 		goto fail;
817 	}
818 
819 	req->id = hw_sc_id;
820 	req->dir = dir;
821 
822 	if (!clear)
823 		goto send_msg;
824 
825 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
826 	if (!clear_req) {
827 		ret = -ENOMEM;
828 		goto fail;
829 	}
830 	clear_req->id = hw_sc_id;
831 	clear_req->dir = dir;
832 	clear_req->type = MCS_RSRC_TYPE_SC;
833 
834 send_msg:
835 	ret = otx2_sync_mbox_msg(mbox);
836 	if (ret)
837 		goto fail;
838 
839 	rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
840 						       0, &req->hdr);
841 	if (IS_ERR(rsp)) {
842 		ret = PTR_ERR(rsp);
843 		goto fail;
844 	}
845 
846 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
847 
848 	mutex_unlock(&mbox->lock);
849 
850 	return 0;
851 fail:
852 	mutex_unlock(&mbox->lock);
853 	return ret;
854 }
855 
cn10k_mcs_secy_stats(struct otx2_nic * pfvf,u8 hw_secy_id,struct mcs_secy_stats * rsp_p,enum mcs_direction dir,bool clear)856 static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
857 				struct mcs_secy_stats *rsp_p,
858 				enum mcs_direction dir, bool clear)
859 {
860 	struct mcs_clear_stats *clear_req;
861 	struct mbox *mbox = &pfvf->mbox;
862 	struct mcs_secy_stats *rsp;
863 	struct mcs_stats_req *req;
864 	int ret;
865 
866 	mutex_lock(&mbox->lock);
867 
868 	req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
869 	if (!req) {
870 		ret = -ENOMEM;
871 		goto fail;
872 	}
873 
874 	req->id = hw_secy_id;
875 	req->dir = dir;
876 
877 	if (!clear)
878 		goto send_msg;
879 
880 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
881 	if (!clear_req) {
882 		ret = -ENOMEM;
883 		goto fail;
884 	}
885 	clear_req->id = hw_secy_id;
886 	clear_req->dir = dir;
887 	clear_req->type = MCS_RSRC_TYPE_SECY;
888 
889 send_msg:
890 	ret = otx2_sync_mbox_msg(mbox);
891 	if (ret)
892 		goto fail;
893 
894 	rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
895 							 0, &req->hdr);
896 	if (IS_ERR(rsp)) {
897 		ret = PTR_ERR(rsp);
898 		goto fail;
899 	}
900 
901 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
902 
903 	mutex_unlock(&mbox->lock);
904 
905 	return 0;
906 fail:
907 	mutex_unlock(&mbox->lock);
908 	return ret;
909 }
910 
cn10k_mcs_create_txsc(struct otx2_nic * pfvf)911 static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
912 {
913 	struct cn10k_mcs_txsc *txsc;
914 	int ret;
915 
916 	txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
917 	if (!txsc)
918 		return ERR_PTR(-ENOMEM);
919 
920 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
921 				   &txsc->hw_flow_id);
922 	if (ret)
923 		goto fail;
924 
925 	/* For a SecY, one TX secy and one RX secy HW resources are needed */
926 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
927 				   &txsc->hw_secy_id_tx);
928 	if (ret)
929 		goto free_flowid;
930 
931 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
932 				   &txsc->hw_secy_id_rx);
933 	if (ret)
934 		goto free_tx_secy;
935 
936 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
937 				   &txsc->hw_sc_id);
938 	if (ret)
939 		goto free_rx_secy;
940 
941 	return txsc;
942 free_rx_secy:
943 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
944 			    txsc->hw_secy_id_rx, false);
945 free_tx_secy:
946 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
947 			    txsc->hw_secy_id_tx, false);
948 free_flowid:
949 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
950 			    txsc->hw_flow_id, false);
951 fail:
952 	kfree(txsc);
953 	return ERR_PTR(ret);
954 }
955 
956 /* Free Tx SC and its SAs(if any) resources to AF
957  */
cn10k_mcs_delete_txsc(struct otx2_nic * pfvf,struct cn10k_mcs_txsc * txsc)958 static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
959 				  struct cn10k_mcs_txsc *txsc)
960 {
961 	u8 sa_bmap = txsc->sa_bmap;
962 	u8 sa_num = 0;
963 
964 	while (sa_bmap) {
965 		if (sa_bmap & 1) {
966 			cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
967 						   txsc, sa_num);
968 			cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
969 		}
970 		sa_num++;
971 		sa_bmap >>= 1;
972 	}
973 
974 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
975 			    txsc->hw_sc_id, false);
976 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
977 			    txsc->hw_secy_id_rx, false);
978 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
979 			    txsc->hw_secy_id_tx, false);
980 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
981 			    txsc->hw_flow_id, false);
982 }
983 
cn10k_mcs_create_rxsc(struct otx2_nic * pfvf)984 static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
985 {
986 	struct cn10k_mcs_rxsc *rxsc;
987 	int ret;
988 
989 	rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
990 	if (!rxsc)
991 		return ERR_PTR(-ENOMEM);
992 
993 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
994 				   &rxsc->hw_flow_id);
995 	if (ret)
996 		goto fail;
997 
998 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
999 				   &rxsc->hw_sc_id);
1000 	if (ret)
1001 		goto free_flowid;
1002 
1003 	return rxsc;
1004 free_flowid:
1005 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
1006 			    rxsc->hw_flow_id, false);
1007 fail:
1008 	kfree(rxsc);
1009 	return ERR_PTR(ret);
1010 }
1011 
1012 /* Free Rx SC and its SAs(if any) resources to AF
1013  */
cn10k_mcs_delete_rxsc(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc)1014 static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
1015 				  struct cn10k_mcs_rxsc *rxsc)
1016 {
1017 	u8 sa_bmap = rxsc->sa_bmap;
1018 	u8 sa_num = 0;
1019 
1020 	while (sa_bmap) {
1021 		if (sa_bmap & 1) {
1022 			cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
1023 						   sa_num, false);
1024 			cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1025 		}
1026 		sa_num++;
1027 		sa_bmap >>= 1;
1028 	}
1029 
1030 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
1031 			    rxsc->hw_sc_id, false);
1032 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
1033 			    rxsc->hw_flow_id, false);
1034 }
1035 
cn10k_mcs_secy_tx_cfg(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,struct macsec_tx_sa * sw_tx_sa,u8 sa_num)1036 static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
1037 				 struct cn10k_mcs_txsc *txsc,
1038 				 struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
1039 {
1040 	if (sw_tx_sa) {
1041 		cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1042 		cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, sw_tx_sa->next_pn);
1043 		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
1044 					sw_tx_sa->active);
1045 	}
1046 
1047 	cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
1048 	cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
1049 	/* When updating secy, change RX secy also */
1050 	cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
1051 
1052 	return 0;
1053 }
1054 
cn10k_mcs_secy_rx_cfg(struct otx2_nic * pfvf,struct macsec_secy * secy,u8 hw_secy_id)1055 static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
1056 				 struct macsec_secy *secy, u8 hw_secy_id)
1057 {
1058 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1059 	struct cn10k_mcs_rxsc *mcs_rx_sc;
1060 	struct macsec_rx_sc *sw_rx_sc;
1061 	struct macsec_rx_sa *sw_rx_sa;
1062 	u8 sa_num;
1063 
1064 	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
1065 	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
1066 		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1067 		if (unlikely(!mcs_rx_sc))
1068 			continue;
1069 
1070 		for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
1071 			sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
1072 			if (!sw_rx_sa)
1073 				continue;
1074 
1075 			cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
1076 						   sa_num, sw_rx_sa->active);
1077 			cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
1078 						 sw_rx_sa->next_pn);
1079 		}
1080 
1081 		cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
1082 		cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
1083 	}
1084 
1085 	return 0;
1086 }
1087 
cn10k_mcs_disable_rxscs(struct otx2_nic * pfvf,struct macsec_secy * secy,bool delete)1088 static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
1089 				   struct macsec_secy *secy,
1090 				   bool delete)
1091 {
1092 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1093 	struct cn10k_mcs_rxsc *mcs_rx_sc;
1094 	struct macsec_rx_sc *sw_rx_sc;
1095 	int ret;
1096 
1097 	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
1098 	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
1099 		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1100 		if (unlikely(!mcs_rx_sc))
1101 			continue;
1102 
1103 		ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
1104 					       false, MCS_RX);
1105 		if (ret)
1106 			dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
1107 				mcs_rx_sc->hw_sc_id);
1108 		if (delete) {
1109 			cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
1110 			list_del(&mcs_rx_sc->entry);
1111 			kfree(mcs_rx_sc);
1112 		}
1113 	}
1114 
1115 	return 0;
1116 }
1117 
cn10k_mcs_sync_stats(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)1118 static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
1119 				 struct cn10k_mcs_txsc *txsc)
1120 {
1121 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1122 	struct mcs_secy_stats rx_rsp = { 0 };
1123 	struct mcs_sc_stats sc_rsp = { 0 };
1124 	struct cn10k_mcs_rxsc *rxsc;
1125 
1126 	/* Because of shared counters for some stats in the hardware, when
1127 	 * updating secy policy take a snapshot of current stats and reset them.
1128 	 * Below are the effected stats because of shared counters.
1129 	 */
1130 
1131 	/* Check if sync is really needed */
1132 	if (secy->validate_frames == txsc->last_validate_frames &&
1133 	    secy->replay_protect == txsc->last_replay_protect)
1134 		return;
1135 
1136 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1137 
1138 	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1139 	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1140 	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1141 	if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
1142 		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1143 	else
1144 		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1145 
1146 	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
1147 		cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
1148 
1149 		rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
1150 		rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
1151 
1152 		rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
1153 		rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
1154 
1155 		if (txsc->last_replay_protect)
1156 			rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
1157 		else
1158 			rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
1159 
1160 		if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
1161 			rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
1162 		else
1163 			rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
1164 	}
1165 
1166 	txsc->last_validate_frames = secy->validate_frames;
1167 	txsc->last_replay_protect = secy->replay_protect;
1168 }
1169 
cn10k_mdo_open(struct macsec_context * ctx)1170 static int cn10k_mdo_open(struct macsec_context *ctx)
1171 {
1172 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1173 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1174 	struct macsec_secy *secy = ctx->secy;
1175 	struct macsec_tx_sa *sw_tx_sa;
1176 	struct cn10k_mcs_txsc *txsc;
1177 	u8 sa_num;
1178 	int err;
1179 
1180 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1181 	if (!txsc)
1182 		return -ENOENT;
1183 
1184 	sa_num = txsc->encoding_sa;
1185 	sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1186 
1187 	err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
1188 	if (err)
1189 		return err;
1190 
1191 	return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
1192 }
1193 
cn10k_mdo_stop(struct macsec_context * ctx)1194 static int cn10k_mdo_stop(struct macsec_context *ctx)
1195 {
1196 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1197 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1198 	struct cn10k_mcs_txsc *txsc;
1199 	int err;
1200 
1201 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1202 	if (!txsc)
1203 		return -ENOENT;
1204 
1205 	err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1206 	if (err)
1207 		return err;
1208 
1209 	return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
1210 }
1211 
cn10k_mdo_add_secy(struct macsec_context * ctx)1212 static int cn10k_mdo_add_secy(struct macsec_context *ctx)
1213 {
1214 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1215 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1216 	struct macsec_secy *secy = ctx->secy;
1217 	struct cn10k_mcs_txsc *txsc;
1218 
1219 	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
1220 		return -EOPNOTSUPP;
1221 
1222 	txsc = cn10k_mcs_create_txsc(pfvf);
1223 	if (IS_ERR(txsc))
1224 		return -ENOSPC;
1225 
1226 	txsc->sw_secy = secy;
1227 	txsc->encoding_sa = secy->tx_sc.encoding_sa;
1228 	txsc->last_validate_frames = secy->validate_frames;
1229 	txsc->last_replay_protect = secy->replay_protect;
1230 	txsc->vlan_dev = is_vlan_dev(ctx->netdev);
1231 
1232 	list_add(&txsc->entry, &cfg->txsc_list);
1233 
1234 	if (netif_running(secy->netdev))
1235 		return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1236 
1237 	return 0;
1238 }
1239 
cn10k_mdo_upd_secy(struct macsec_context * ctx)1240 static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
1241 {
1242 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1243 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1244 	struct macsec_secy *secy = ctx->secy;
1245 	struct macsec_tx_sa *sw_tx_sa;
1246 	struct cn10k_mcs_txsc *txsc;
1247 	bool active;
1248 	u8 sa_num;
1249 	int err;
1250 
1251 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1252 	if (!txsc)
1253 		return -ENOENT;
1254 
1255 	/* Encoding SA got changed */
1256 	if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
1257 		txsc->encoding_sa = secy->tx_sc.encoding_sa;
1258 		sa_num = txsc->encoding_sa;
1259 		sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1260 		active = sw_tx_sa ? sw_tx_sa->active : false;
1261 		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
1262 	}
1263 
1264 	if (netif_running(secy->netdev)) {
1265 		cn10k_mcs_sync_stats(pfvf, secy, txsc);
1266 
1267 		err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1268 		if (err)
1269 			return err;
1270 	}
1271 
1272 	return 0;
1273 }
1274 
cn10k_mdo_del_secy(struct macsec_context * ctx)1275 static int cn10k_mdo_del_secy(struct macsec_context *ctx)
1276 {
1277 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1278 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1279 	struct cn10k_mcs_txsc *txsc;
1280 
1281 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1282 	if (!txsc)
1283 		return -ENOENT;
1284 
1285 	cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1286 	cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
1287 	cn10k_mcs_delete_txsc(pfvf, txsc);
1288 	list_del(&txsc->entry);
1289 	kfree(txsc);
1290 
1291 	return 0;
1292 }
1293 
cn10k_mdo_add_txsa(struct macsec_context * ctx)1294 static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
1295 {
1296 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1297 	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1298 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1299 	struct macsec_secy *secy = ctx->secy;
1300 	u8 sa_num = ctx->sa.assoc_num;
1301 	struct cn10k_mcs_txsc *txsc;
1302 	int err;
1303 
1304 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1305 	if (!txsc)
1306 		return -ENOENT;
1307 
1308 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1309 		return -EOPNOTSUPP;
1310 
1311 	if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
1312 		return -ENOSPC;
1313 
1314 	memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
1315 	memcpy(&txsc->salt[sa_num], sw_tx_sa->key.salt.bytes, MACSEC_SALT_LEN);
1316 	txsc->ssci[sa_num] = sw_tx_sa->ssci;
1317 
1318 	txsc->sa_bmap |= 1 << sa_num;
1319 
1320 	if (netif_running(secy->netdev)) {
1321 		err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1322 		if (err)
1323 			return err;
1324 
1325 		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1326 					   sw_tx_sa->next_pn);
1327 		if (err)
1328 			return err;
1329 
1330 		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1331 					      sa_num, sw_tx_sa->active);
1332 		if (err)
1333 			return err;
1334 	}
1335 
1336 	return 0;
1337 }
1338 
cn10k_mdo_upd_txsa(struct macsec_context * ctx)1339 static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
1340 {
1341 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1342 	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1343 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1344 	struct macsec_secy *secy = ctx->secy;
1345 	u8 sa_num = ctx->sa.assoc_num;
1346 	struct cn10k_mcs_txsc *txsc;
1347 	int err;
1348 
1349 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1350 	if (!txsc)
1351 		return -ENOENT;
1352 
1353 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1354 		return -EOPNOTSUPP;
1355 
1356 	if (netif_running(secy->netdev)) {
1357 		/* Keys cannot be changed after creation */
1358 		if (ctx->sa.update_pn) {
1359 			err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1360 						   sw_tx_sa->next_pn);
1361 			if (err)
1362 				return err;
1363 		}
1364 
1365 		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1366 					      sa_num, sw_tx_sa->active);
1367 		if (err)
1368 			return err;
1369 	}
1370 
1371 	return 0;
1372 }
1373 
cn10k_mdo_del_txsa(struct macsec_context * ctx)1374 static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
1375 {
1376 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1377 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1378 	u8 sa_num = ctx->sa.assoc_num;
1379 	struct cn10k_mcs_txsc *txsc;
1380 
1381 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1382 	if (!txsc)
1383 		return -ENOENT;
1384 
1385 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1386 		return -EOPNOTSUPP;
1387 
1388 	cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
1389 	txsc->sa_bmap &= ~(1 << sa_num);
1390 
1391 	return 0;
1392 }
1393 
cn10k_mdo_add_rxsc(struct macsec_context * ctx)1394 static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
1395 {
1396 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1397 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1398 	struct macsec_secy *secy = ctx->secy;
1399 	struct cn10k_mcs_rxsc *rxsc;
1400 	struct cn10k_mcs_txsc *txsc;
1401 	int err;
1402 
1403 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1404 	if (!txsc)
1405 		return -ENOENT;
1406 
1407 	rxsc = cn10k_mcs_create_rxsc(pfvf);
1408 	if (IS_ERR(rxsc))
1409 		return -ENOSPC;
1410 
1411 	rxsc->sw_secy = ctx->secy;
1412 	rxsc->sw_rxsc = ctx->rx_sc;
1413 	list_add(&rxsc->entry, &cfg->rxsc_list);
1414 
1415 	if (netif_running(secy->netdev)) {
1416 		err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
1417 		if (err)
1418 			return err;
1419 
1420 		err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
1421 		if (err)
1422 			return err;
1423 	}
1424 
1425 	return 0;
1426 }
1427 
cn10k_mdo_upd_rxsc(struct macsec_context * ctx)1428 static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
1429 {
1430 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1431 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1432 	struct macsec_secy *secy = ctx->secy;
1433 	bool enable = ctx->rx_sc->active;
1434 	struct cn10k_mcs_rxsc *rxsc;
1435 
1436 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1437 	if (!rxsc)
1438 		return -ENOENT;
1439 
1440 	if (netif_running(secy->netdev))
1441 		return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
1442 						enable, MCS_RX);
1443 
1444 	return 0;
1445 }
1446 
cn10k_mdo_del_rxsc(struct macsec_context * ctx)1447 static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
1448 {
1449 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1450 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1451 	struct cn10k_mcs_rxsc *rxsc;
1452 
1453 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
1454 	if (!rxsc)
1455 		return -ENOENT;
1456 
1457 	cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
1458 	cn10k_mcs_delete_rxsc(pfvf, rxsc);
1459 	list_del(&rxsc->entry);
1460 	kfree(rxsc);
1461 
1462 	return 0;
1463 }
1464 
cn10k_mdo_add_rxsa(struct macsec_context * ctx)1465 static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
1466 {
1467 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1468 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1469 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1470 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1471 	struct macsec_secy *secy = ctx->secy;
1472 	bool sa_in_use = rx_sa->active;
1473 	u8 sa_num = ctx->sa.assoc_num;
1474 	struct cn10k_mcs_rxsc *rxsc;
1475 	int err;
1476 
1477 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1478 	if (!rxsc)
1479 		return -ENOENT;
1480 
1481 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1482 		return -EOPNOTSUPP;
1483 
1484 	if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
1485 		return -ENOSPC;
1486 
1487 	memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
1488 	memcpy(&rxsc->salt[sa_num], rx_sa->key.salt.bytes, MACSEC_SALT_LEN);
1489 	rxsc->ssci[sa_num] = rx_sa->ssci;
1490 
1491 	rxsc->sa_bmap |= 1 << sa_num;
1492 
1493 	if (netif_running(secy->netdev)) {
1494 		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
1495 						 sa_num, sa_in_use);
1496 		if (err)
1497 			return err;
1498 
1499 		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
1500 					       rx_sa->next_pn);
1501 		if (err)
1502 			return err;
1503 	}
1504 
1505 	return 0;
1506 }
1507 
cn10k_mdo_upd_rxsa(struct macsec_context * ctx)1508 static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
1509 {
1510 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1511 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1512 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1513 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1514 	struct macsec_secy *secy = ctx->secy;
1515 	bool sa_in_use = rx_sa->active;
1516 	u8 sa_num = ctx->sa.assoc_num;
1517 	struct cn10k_mcs_rxsc *rxsc;
1518 	int err;
1519 
1520 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1521 	if (!rxsc)
1522 		return -ENOENT;
1523 
1524 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1525 		return -EOPNOTSUPP;
1526 
1527 	if (netif_running(secy->netdev)) {
1528 		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
1529 		if (err)
1530 			return err;
1531 
1532 		if (!ctx->sa.update_pn)
1533 			return 0;
1534 
1535 		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
1536 					       rx_sa->next_pn);
1537 		if (err)
1538 			return err;
1539 	}
1540 
1541 	return 0;
1542 }
1543 
cn10k_mdo_del_rxsa(struct macsec_context * ctx)1544 static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
1545 {
1546 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1547 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1548 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1549 	u8 sa_num = ctx->sa.assoc_num;
1550 	struct cn10k_mcs_rxsc *rxsc;
1551 
1552 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1553 	if (!rxsc)
1554 		return -ENOENT;
1555 
1556 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1557 		return -EOPNOTSUPP;
1558 
1559 	cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
1560 	cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1561 
1562 	rxsc->sa_bmap &= ~(1 << sa_num);
1563 
1564 	return 0;
1565 }
1566 
cn10k_mdo_get_dev_stats(struct macsec_context * ctx)1567 static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
1568 {
1569 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1570 	struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
1571 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1572 	struct macsec_secy *secy = ctx->secy;
1573 	struct cn10k_mcs_txsc *txsc;
1574 
1575 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1576 	if (!txsc)
1577 		return -ENOENT;
1578 
1579 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
1580 	ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
1581 	ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
1582 
1583 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1584 	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1585 	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1586 	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1587 	if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
1588 		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1589 	else
1590 		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1591 	txsc->stats.InPktsOverrun = 0;
1592 
1593 	ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
1594 	ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
1595 	ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
1596 	ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
1597 	ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
1598 	ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
1599 
1600 	return 0;
1601 }
1602 
cn10k_mdo_get_tx_sc_stats(struct macsec_context * ctx)1603 static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
1604 {
1605 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1606 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1607 	struct mcs_sc_stats rsp = { 0 };
1608 	struct cn10k_mcs_txsc *txsc;
1609 
1610 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1611 	if (!txsc)
1612 		return -ENOENT;
1613 
1614 	cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
1615 
1616 	ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1617 	ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1618 	ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
1619 	ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
1620 
1621 	return 0;
1622 }
1623 
cn10k_mdo_get_tx_sa_stats(struct macsec_context * ctx)1624 static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
1625 {
1626 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1627 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1628 	struct mcs_sa_stats rsp = { 0 };
1629 	u8 sa_num = ctx->sa.assoc_num;
1630 	struct cn10k_mcs_txsc *txsc;
1631 
1632 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1633 	if (!txsc)
1634 		return -ENOENT;
1635 
1636 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1637 		return -EOPNOTSUPP;
1638 
1639 	cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
1640 
1641 	ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1642 	ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1643 
1644 	return 0;
1645 }
1646 
cn10k_mdo_get_rx_sc_stats(struct macsec_context * ctx)1647 static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
1648 {
1649 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1650 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1651 	struct macsec_secy *secy = ctx->secy;
1652 	struct mcs_sc_stats rsp = { 0 };
1653 	struct cn10k_mcs_rxsc *rxsc;
1654 
1655 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1656 	if (!rxsc)
1657 		return -ENOENT;
1658 
1659 	cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
1660 
1661 	rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
1662 	rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
1663 
1664 	rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
1665 	rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
1666 
1667 	if (secy->replay_protect)
1668 		rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
1669 	else
1670 		rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
1671 
1672 	if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
1673 		rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
1674 	else
1675 		rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
1676 
1677 	ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
1678 	ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
1679 	ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
1680 	ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
1681 	ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
1682 	ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
1683 	ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
1684 	ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
1685 
1686 	return 0;
1687 }
1688 
cn10k_mdo_get_rx_sa_stats(struct macsec_context * ctx)1689 static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
1690 {
1691 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1692 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1693 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1694 	struct mcs_sa_stats rsp = { 0 };
1695 	u8 sa_num = ctx->sa.assoc_num;
1696 	struct cn10k_mcs_rxsc *rxsc;
1697 
1698 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1699 	if (!rxsc)
1700 		return -ENOENT;
1701 
1702 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1703 		return -EOPNOTSUPP;
1704 
1705 	cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
1706 
1707 	ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
1708 	ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
1709 	ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
1710 	ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
1711 	ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
1712 
1713 	return 0;
1714 }
1715 
1716 static const struct macsec_ops cn10k_mcs_ops = {
1717 	.mdo_dev_open = cn10k_mdo_open,
1718 	.mdo_dev_stop = cn10k_mdo_stop,
1719 	.mdo_add_secy = cn10k_mdo_add_secy,
1720 	.mdo_upd_secy = cn10k_mdo_upd_secy,
1721 	.mdo_del_secy = cn10k_mdo_del_secy,
1722 	.mdo_add_rxsc = cn10k_mdo_add_rxsc,
1723 	.mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
1724 	.mdo_del_rxsc = cn10k_mdo_del_rxsc,
1725 	.mdo_add_rxsa = cn10k_mdo_add_rxsa,
1726 	.mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
1727 	.mdo_del_rxsa = cn10k_mdo_del_rxsa,
1728 	.mdo_add_txsa = cn10k_mdo_add_txsa,
1729 	.mdo_upd_txsa = cn10k_mdo_upd_txsa,
1730 	.mdo_del_txsa = cn10k_mdo_del_txsa,
1731 	.mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
1732 	.mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
1733 	.mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
1734 	.mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
1735 	.mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
1736 };
1737 
cn10k_handle_mcs_event(struct otx2_nic * pfvf,struct mcs_intr_info * event)1738 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
1739 {
1740 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1741 	struct macsec_tx_sa *sw_tx_sa = NULL;
1742 	struct macsec_secy *secy = NULL;
1743 	struct cn10k_mcs_txsc *txsc;
1744 	u8 an;
1745 
1746 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1747 		return;
1748 
1749 	if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
1750 		return;
1751 
1752 	/* Find the SecY to which the expired hardware SA is mapped */
1753 	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
1754 		for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
1755 			if (txsc->hw_sa_id[an] == event->sa_id) {
1756 				secy = txsc->sw_secy;
1757 				sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
1758 			}
1759 	}
1760 
1761 	if (secy && sw_tx_sa)
1762 		macsec_pn_wrapped(secy, sw_tx_sa);
1763 }
1764 
cn10k_mcs_init(struct otx2_nic * pfvf)1765 int cn10k_mcs_init(struct otx2_nic *pfvf)
1766 {
1767 	struct mbox *mbox = &pfvf->mbox;
1768 	struct cn10k_mcs_cfg *cfg;
1769 	struct mcs_intr_cfg *req;
1770 
1771 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1772 		return 0;
1773 
1774 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1775 	if (!cfg)
1776 		return -ENOMEM;
1777 
1778 	INIT_LIST_HEAD(&cfg->txsc_list);
1779 	INIT_LIST_HEAD(&cfg->rxsc_list);
1780 	pfvf->macsec_cfg = cfg;
1781 
1782 	pfvf->netdev->features |= NETIF_F_HW_MACSEC;
1783 	pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
1784 
1785 	mutex_lock(&mbox->lock);
1786 
1787 	req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
1788 	if (!req)
1789 		goto fail;
1790 
1791 	req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
1792 
1793 	if (otx2_sync_mbox_msg(mbox))
1794 		goto fail;
1795 
1796 	mutex_unlock(&mbox->lock);
1797 
1798 	return 0;
1799 fail:
1800 	dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
1801 	mutex_unlock(&mbox->lock);
1802 	return 0;
1803 }
1804 
cn10k_mcs_free(struct otx2_nic * pfvf)1805 void cn10k_mcs_free(struct otx2_nic *pfvf)
1806 {
1807 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1808 		return;
1809 
1810 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
1811 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
1812 	kfree(pfvf->macsec_cfg);
1813 	pfvf->macsec_cfg = NULL;
1814 }
1815