1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include "core.h"
8 #include "peer.h"
9 #include "debug.h"
10
ath12k_peer_ml_find(struct ath12k_hw * ah,const u8 * addr)11 static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
12 {
13 struct ath12k_ml_peer *ml_peer;
14
15 lockdep_assert_wiphy(ah->hw->wiphy);
16
17 list_for_each_entry(ml_peer, &ah->ml_peers, list) {
18 if (!ether_addr_equal(ml_peer->addr, addr))
19 continue;
20
21 return ml_peer;
22 }
23
24 return NULL;
25 }
26
ath12k_peer_find(struct ath12k_base * ab,int vdev_id,const u8 * addr)27 struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
28 const u8 *addr)
29 {
30 struct ath12k_peer *peer;
31
32 lockdep_assert_held(&ab->base_lock);
33
34 list_for_each_entry(peer, &ab->peers, list) {
35 if (peer->vdev_id != vdev_id)
36 continue;
37 if (!ether_addr_equal(peer->addr, addr))
38 continue;
39
40 return peer;
41 }
42
43 return NULL;
44 }
45
ath12k_peer_find_by_pdev_idx(struct ath12k_base * ab,u8 pdev_idx,const u8 * addr)46 static struct ath12k_peer *ath12k_peer_find_by_pdev_idx(struct ath12k_base *ab,
47 u8 pdev_idx, const u8 *addr)
48 {
49 struct ath12k_peer *peer;
50
51 lockdep_assert_held(&ab->base_lock);
52
53 list_for_each_entry(peer, &ab->peers, list) {
54 if (peer->pdev_idx != pdev_idx)
55 continue;
56 if (!ether_addr_equal(peer->addr, addr))
57 continue;
58
59 return peer;
60 }
61
62 return NULL;
63 }
64
ath12k_peer_find_by_addr(struct ath12k_base * ab,const u8 * addr)65 struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
66 const u8 *addr)
67 {
68 struct ath12k_peer *peer;
69
70 lockdep_assert_held(&ab->base_lock);
71
72 list_for_each_entry(peer, &ab->peers, list) {
73 if (!ether_addr_equal(peer->addr, addr))
74 continue;
75
76 return peer;
77 }
78
79 return NULL;
80 }
81
ath12k_peer_find_by_ml_id(struct ath12k_base * ab,int ml_peer_id)82 static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab,
83 int ml_peer_id)
84 {
85 struct ath12k_peer *peer;
86
87 lockdep_assert_held(&ab->base_lock);
88
89 list_for_each_entry(peer, &ab->peers, list)
90 if (ml_peer_id == peer->ml_id)
91 return peer;
92
93 return NULL;
94 }
95
ath12k_peer_find_by_id(struct ath12k_base * ab,int peer_id)96 struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
97 int peer_id)
98 {
99 struct ath12k_peer *peer;
100
101 lockdep_assert_held(&ab->base_lock);
102
103 if (peer_id & ATH12K_PEER_ML_ID_VALID)
104 return ath12k_peer_find_by_ml_id(ab, peer_id);
105
106 list_for_each_entry(peer, &ab->peers, list)
107 if (peer_id == peer->peer_id)
108 return peer;
109
110 return NULL;
111 }
112
ath12k_peer_exist_by_vdev_id(struct ath12k_base * ab,int vdev_id)113 bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id)
114 {
115 struct ath12k_peer *peer;
116
117 spin_lock_bh(&ab->base_lock);
118
119 list_for_each_entry(peer, &ab->peers, list) {
120 if (vdev_id == peer->vdev_id) {
121 spin_unlock_bh(&ab->base_lock);
122 return true;
123 }
124 }
125 spin_unlock_bh(&ab->base_lock);
126 return false;
127 }
128
ath12k_peer_find_by_ast(struct ath12k_base * ab,int ast_hash)129 struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
130 int ast_hash)
131 {
132 struct ath12k_peer *peer;
133
134 lockdep_assert_held(&ab->base_lock);
135
136 list_for_each_entry(peer, &ab->peers, list)
137 if (ast_hash == peer->ast_hash)
138 return peer;
139
140 return NULL;
141 }
142
ath12k_peer_unmap_event(struct ath12k_base * ab,u16 peer_id)143 void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
144 {
145 struct ath12k_peer *peer;
146
147 spin_lock_bh(&ab->base_lock);
148
149 peer = ath12k_peer_find_by_id(ab, peer_id);
150 if (!peer) {
151 ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
152 peer_id);
153 goto exit;
154 }
155
156 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
157 peer->vdev_id, peer->addr, peer_id);
158
159 list_del(&peer->list);
160 kfree(peer);
161 wake_up(&ab->peer_mapping_wq);
162
163 exit:
164 spin_unlock_bh(&ab->base_lock);
165 }
166
ath12k_peer_map_event(struct ath12k_base * ab,u8 vdev_id,u16 peer_id,u8 * mac_addr,u16 ast_hash,u16 hw_peer_id)167 void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
168 u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
169 {
170 struct ath12k_peer *peer;
171
172 spin_lock_bh(&ab->base_lock);
173 peer = ath12k_peer_find(ab, vdev_id, mac_addr);
174 if (!peer) {
175 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
176 if (!peer)
177 goto exit;
178
179 peer->vdev_id = vdev_id;
180 peer->peer_id = peer_id;
181 peer->ast_hash = ast_hash;
182 peer->hw_peer_id = hw_peer_id;
183 ether_addr_copy(peer->addr, mac_addr);
184 list_add(&peer->list, &ab->peers);
185 wake_up(&ab->peer_mapping_wq);
186 }
187
188 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
189 vdev_id, mac_addr, peer_id);
190
191 exit:
192 spin_unlock_bh(&ab->base_lock);
193 }
194
ath12k_wait_for_peer_common(struct ath12k_base * ab,int vdev_id,const u8 * addr,bool expect_mapped)195 static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
196 const u8 *addr, bool expect_mapped)
197 {
198 int ret;
199
200 ret = wait_event_timeout(ab->peer_mapping_wq, ({
201 bool mapped;
202
203 spin_lock_bh(&ab->base_lock);
204 mapped = !!ath12k_peer_find(ab, vdev_id, addr);
205 spin_unlock_bh(&ab->base_lock);
206
207 (mapped == expect_mapped ||
208 test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags));
209 }), 3 * HZ);
210
211 if (ret <= 0)
212 return -ETIMEDOUT;
213
214 return 0;
215 }
216
ath12k_peer_cleanup(struct ath12k * ar,u32 vdev_id)217 void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
218 {
219 struct ath12k_peer *peer, *tmp;
220 struct ath12k_base *ab = ar->ab;
221
222 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
223
224 spin_lock_bh(&ab->base_lock);
225 list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
226 if (peer->vdev_id != vdev_id)
227 continue;
228
229 ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
230 peer->addr, vdev_id);
231
232 list_del(&peer->list);
233 kfree(peer);
234 ar->num_peers--;
235 }
236
237 spin_unlock_bh(&ab->base_lock);
238 }
239
ath12k_wait_for_peer_deleted(struct ath12k * ar,int vdev_id,const u8 * addr)240 static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr)
241 {
242 return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
243 }
244
ath12k_wait_for_peer_delete_done(struct ath12k * ar,u32 vdev_id,const u8 * addr)245 int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
246 const u8 *addr)
247 {
248 int ret;
249 unsigned long time_left;
250
251 ret = ath12k_wait_for_peer_deleted(ar, vdev_id, addr);
252 if (ret) {
253 ath12k_warn(ar->ab, "failed wait for peer deleted");
254 return ret;
255 }
256
257 time_left = wait_for_completion_timeout(&ar->peer_delete_done,
258 3 * HZ);
259 if (time_left == 0) {
260 ath12k_warn(ar->ab, "Timeout in receiving peer delete response\n");
261 return -ETIMEDOUT;
262 }
263
264 return 0;
265 }
266
ath12k_peer_delete_send(struct ath12k * ar,u32 vdev_id,const u8 * addr)267 static int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr)
268 {
269 struct ath12k_base *ab = ar->ab;
270 int ret;
271
272 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
273
274 reinit_completion(&ar->peer_delete_done);
275
276 ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
277 if (ret) {
278 ath12k_warn(ab,
279 "failed to delete peer vdev_id %d addr %pM ret %d\n",
280 vdev_id, addr, ret);
281 return ret;
282 }
283
284 return 0;
285 }
286
ath12k_peer_delete(struct ath12k * ar,u32 vdev_id,u8 * addr)287 int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
288 {
289 int ret;
290
291 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
292
293 ret = ath12k_peer_delete_send(ar, vdev_id, addr);
294 if (ret)
295 return ret;
296
297 ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
298 if (ret)
299 return ret;
300
301 ar->num_peers--;
302
303 return 0;
304 }
305
ath12k_wait_for_peer_created(struct ath12k * ar,int vdev_id,const u8 * addr)306 static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr)
307 {
308 return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
309 }
310
ath12k_peer_create(struct ath12k * ar,struct ath12k_link_vif * arvif,struct ieee80211_sta * sta,struct ath12k_wmi_peer_create_arg * arg)311 int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
312 struct ieee80211_sta *sta,
313 struct ath12k_wmi_peer_create_arg *arg)
314 {
315 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
316 struct ath12k_link_sta *arsta;
317 u8 link_id = arvif->link_id;
318 struct ath12k_peer *peer;
319 struct ath12k_sta *ahsta;
320 u16 ml_peer_id;
321 int ret;
322
323 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
324
325 if (ar->num_peers > (ar->max_num_peers - 1)) {
326 ath12k_warn(ar->ab,
327 "failed to create peer due to insufficient peer entry resource in firmware\n");
328 return -ENOBUFS;
329 }
330
331 spin_lock_bh(&ar->ab->base_lock);
332 peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr);
333 if (peer) {
334 spin_unlock_bh(&ar->ab->base_lock);
335 return -EINVAL;
336 }
337 spin_unlock_bh(&ar->ab->base_lock);
338
339 ret = ath12k_wmi_send_peer_create_cmd(ar, arg);
340 if (ret) {
341 ath12k_warn(ar->ab,
342 "failed to send peer create vdev_id %d ret %d\n",
343 arg->vdev_id, ret);
344 return ret;
345 }
346
347 ret = ath12k_wait_for_peer_created(ar, arg->vdev_id,
348 arg->peer_addr);
349 if (ret)
350 return ret;
351
352 spin_lock_bh(&ar->ab->base_lock);
353
354 peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr);
355 if (!peer) {
356 spin_unlock_bh(&ar->ab->base_lock);
357 ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
358 arg->peer_addr, arg->vdev_id);
359
360 reinit_completion(&ar->peer_delete_done);
361
362 ret = ath12k_wmi_send_peer_delete_cmd(ar, arg->peer_addr,
363 arg->vdev_id);
364 if (ret) {
365 ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
366 arg->vdev_id, arg->peer_addr);
367 return ret;
368 }
369
370 ret = ath12k_wait_for_peer_delete_done(ar, arg->vdev_id,
371 arg->peer_addr);
372 if (ret)
373 return ret;
374
375 return -ENOENT;
376 }
377
378 peer->pdev_idx = ar->pdev_idx;
379 peer->sta = sta;
380
381 if (vif->type == NL80211_IFTYPE_STATION) {
382 arvif->ast_hash = peer->ast_hash;
383 arvif->ast_idx = peer->hw_peer_id;
384 }
385
386 if (sta) {
387 ahsta = ath12k_sta_to_ahsta(sta);
388 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
389 ahsta->link[link_id]);
390
391 peer->link_id = arsta->link_id;
392
393 /* Fill ML info into created peer */
394 if (sta->mlo) {
395 ml_peer_id = ahsta->ml_peer_id;
396 peer->ml_id = ml_peer_id | ATH12K_PEER_ML_ID_VALID;
397 ether_addr_copy(peer->ml_addr, sta->addr);
398
399 /* the assoc link is considered primary for now */
400 peer->primary_link = arsta->is_assoc_link;
401 peer->mlo = true;
402 } else {
403 peer->ml_id = ATH12K_MLO_PEER_ID_INVALID;
404 peer->primary_link = true;
405 peer->mlo = false;
406 }
407 }
408
409 peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
410 peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
411
412 ar->num_peers++;
413
414 spin_unlock_bh(&ar->ab->base_lock);
415
416 return 0;
417 }
418
ath12k_peer_ml_alloc(struct ath12k_hw * ah)419 static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
420 {
421 u16 ml_peer_id;
422
423 lockdep_assert_wiphy(ah->hw->wiphy);
424
425 for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) {
426 if (test_bit(ml_peer_id, ah->free_ml_peer_id_map))
427 continue;
428
429 set_bit(ml_peer_id, ah->free_ml_peer_id_map);
430 break;
431 }
432
433 if (ml_peer_id == ATH12K_MAX_MLO_PEERS)
434 ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
435
436 return ml_peer_id;
437 }
438
ath12k_peer_ml_create(struct ath12k_hw * ah,struct ieee80211_sta * sta)439 int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
440 {
441 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
442 struct ath12k_ml_peer *ml_peer;
443
444 lockdep_assert_wiphy(ah->hw->wiphy);
445
446 if (!sta->mlo)
447 return -EINVAL;
448
449 ml_peer = ath12k_peer_ml_find(ah, sta->addr);
450 if (ml_peer) {
451 ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM",
452 ml_peer->id, sta->addr);
453 return -EEXIST;
454 }
455
456 ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
457 if (!ml_peer)
458 return -ENOMEM;
459
460 ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
461
462 if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
463 ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
464 sta->addr);
465 kfree(ml_peer);
466 return -ENOMEM;
467 }
468
469 ether_addr_copy(ml_peer->addr, sta->addr);
470 ml_peer->id = ahsta->ml_peer_id;
471 list_add(&ml_peer->list, &ah->ml_peers);
472
473 return 0;
474 }
475
ath12k_peer_ml_delete(struct ath12k_hw * ah,struct ieee80211_sta * sta)476 int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
477 {
478 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
479 struct ath12k_ml_peer *ml_peer;
480
481 lockdep_assert_wiphy(ah->hw->wiphy);
482
483 if (!sta->mlo)
484 return -EINVAL;
485
486 clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
487 ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
488
489 ml_peer = ath12k_peer_ml_find(ah, sta->addr);
490 if (!ml_peer) {
491 ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr);
492 return -EINVAL;
493 }
494
495 list_del(&ml_peer->list);
496 kfree(ml_peer);
497
498 return 0;
499 }
500
ath12k_peer_mlo_link_peers_delete(struct ath12k_vif * ahvif,struct ath12k_sta * ahsta)501 int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
502 {
503 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
504 struct ath12k_hw *ah = ahvif->ah;
505 struct ath12k_link_vif *arvif;
506 struct ath12k_link_sta *arsta;
507 unsigned long links;
508 struct ath12k *ar;
509 int ret, err_ret = 0;
510 u8 link_id;
511
512 lockdep_assert_wiphy(ah->hw->wiphy);
513
514 if (!sta->mlo)
515 return -EINVAL;
516
517 /* FW expects delete of all link peers at once before waiting for reception
518 * of peer unmap or delete responses
519 */
520 links = ahsta->links_map;
521 for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
522 arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
523 arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
524 if (!arvif || !arsta)
525 continue;
526
527 ar = arvif->ar;
528 if (!ar)
529 continue;
530
531 ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
532
533 ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
534 if (ret) {
535 ath12k_warn(ar->ab,
536 "failed to delete peer vdev_id %d addr %pM ret %d\n",
537 arvif->vdev_id, arsta->addr, ret);
538 err_ret = ret;
539 continue;
540 }
541 }
542
543 /* Ensure all link peers are deleted and unmapped */
544 links = ahsta->links_map;
545 for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
546 arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
547 arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
548 if (!arvif || !arsta)
549 continue;
550
551 ar = arvif->ar;
552 if (!ar)
553 continue;
554
555 ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr);
556 if (ret) {
557 err_ret = ret;
558 continue;
559 }
560 ar->num_peers--;
561 }
562
563 return err_ret;
564 }
565