1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/remoteproc.h>
10 #include <linux/firmware.h>
11 #include <linux/of.h>
12 #include <linux/of_graph.h>
13 #include "core.h"
14 #include "dp_tx.h"
15 #include "dp_rx.h"
16 #include "debug.h"
17 #include "hif.h"
18 #include "fw.h"
19 #include "debugfs.h"
20 #include "wow.h"
21 
22 unsigned int ath12k_debug_mask;
23 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
24 MODULE_PARM_DESC(debug_mask, "Debugging mask");
25 
26 /* protected with ath12k_hw_group_mutex */
27 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
28 
29 static DEFINE_MUTEX(ath12k_hw_group_mutex);
30 
ath12k_core_rfkill_config(struct ath12k_base * ab)31 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
32 {
33 	struct ath12k *ar;
34 	int ret = 0, i;
35 
36 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
37 		return 0;
38 
39 	for (i = 0; i < ab->num_radios; i++) {
40 		ar = ab->pdevs[i].ar;
41 
42 		ret = ath12k_mac_rfkill_config(ar);
43 		if (ret && ret != -EOPNOTSUPP) {
44 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
45 			return ret;
46 		}
47 	}
48 
49 	return ret;
50 }
51 
52 /* Check if we need to continue with suspend/resume operation.
53  * Return:
54  *	a negative value: error happens and don't continue.
55  *	0:  no error but don't continue.
56  *	positive value: no error and do continue.
57  */
ath12k_core_continue_suspend_resume(struct ath12k_base * ab)58 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
59 {
60 	struct ath12k *ar;
61 
62 	if (!ab->hw_params->supports_suspend)
63 		return -EOPNOTSUPP;
64 
65 	/* so far single_pdev_only chips have supports_suspend as true
66 	 * so pass 0 as a dummy pdev_id here.
67 	 */
68 	ar = ab->pdevs[0].ar;
69 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
70 		return 0;
71 
72 	return 1;
73 }
74 
ath12k_core_suspend(struct ath12k_base * ab)75 int ath12k_core_suspend(struct ath12k_base *ab)
76 {
77 	struct ath12k *ar;
78 	int ret, i;
79 
80 	ret = ath12k_core_continue_suspend_resume(ab);
81 	if (ret <= 0)
82 		return ret;
83 
84 	for (i = 0; i < ab->num_radios; i++) {
85 		ar = ab->pdevs[i].ar;
86 		if (!ar)
87 			continue;
88 
89 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
90 
91 		ret = ath12k_mac_wait_tx_complete(ar);
92 		if (ret) {
93 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
94 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
95 			return ret;
96 		}
97 
98 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
99 	}
100 
101 	/* PM framework skips suspend_late/resume_early callbacks
102 	 * if other devices report errors in their suspend callbacks.
103 	 * However ath12k_core_resume() would still be called because
104 	 * here we return success thus kernel put us on dpm_suspended_list.
105 	 * Since we won't go through a power down/up cycle, there is
106 	 * no chance to call complete(&ab->restart_completed) in
107 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
108 	 * So call it here to avoid this issue. This also works in case
109 	 * no error happens thus suspend_late/resume_early get called,
110 	 * because it will be reinitialized in ath12k_core_resume_early().
111 	 */
112 	complete(&ab->restart_completed);
113 
114 	return 0;
115 }
116 EXPORT_SYMBOL(ath12k_core_suspend);
117 
ath12k_core_suspend_late(struct ath12k_base * ab)118 int ath12k_core_suspend_late(struct ath12k_base *ab)
119 {
120 	int ret;
121 
122 	ret = ath12k_core_continue_suspend_resume(ab);
123 	if (ret <= 0)
124 		return ret;
125 
126 	ath12k_acpi_stop(ab);
127 
128 	ath12k_hif_irq_disable(ab);
129 	ath12k_hif_ce_irq_disable(ab);
130 
131 	ath12k_hif_power_down(ab, true);
132 
133 	return 0;
134 }
135 EXPORT_SYMBOL(ath12k_core_suspend_late);
136 
ath12k_core_resume_early(struct ath12k_base * ab)137 int ath12k_core_resume_early(struct ath12k_base *ab)
138 {
139 	int ret;
140 
141 	ret = ath12k_core_continue_suspend_resume(ab);
142 	if (ret <= 0)
143 		return ret;
144 
145 	reinit_completion(&ab->restart_completed);
146 	ret = ath12k_hif_power_up(ab);
147 	if (ret)
148 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
149 
150 	return ret;
151 }
152 EXPORT_SYMBOL(ath12k_core_resume_early);
153 
ath12k_core_resume(struct ath12k_base * ab)154 int ath12k_core_resume(struct ath12k_base *ab)
155 {
156 	long time_left;
157 	int ret;
158 
159 	ret = ath12k_core_continue_suspend_resume(ab);
160 	if (ret <= 0)
161 		return ret;
162 
163 	time_left = wait_for_completion_timeout(&ab->restart_completed,
164 						ATH12K_RESET_TIMEOUT_HZ);
165 	if (time_left == 0) {
166 		ath12k_warn(ab, "timeout while waiting for restart complete");
167 		return -ETIMEDOUT;
168 	}
169 
170 	return 0;
171 }
172 EXPORT_SYMBOL(ath12k_core_resume);
173 
__ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len,bool with_variant,bool bus_type_mode)174 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
175 					   size_t name_len, bool with_variant,
176 					   bool bus_type_mode)
177 {
178 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
179 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
180 
181 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
182 		scnprintf(variant, sizeof(variant), ",variant=%s",
183 			  ab->qmi.target.bdf_ext);
184 
185 	switch (ab->id.bdf_search) {
186 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
187 		if (bus_type_mode)
188 			scnprintf(name, name_len,
189 				  "bus=%s",
190 				  ath12k_bus_str(ab->hif.bus));
191 		else
192 			scnprintf(name, name_len,
193 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
194 				  ath12k_bus_str(ab->hif.bus),
195 				  ab->id.vendor, ab->id.device,
196 				  ab->id.subsystem_vendor,
197 				  ab->id.subsystem_device,
198 				  ab->qmi.target.chip_id,
199 				  ab->qmi.target.board_id,
200 				  variant);
201 		break;
202 	default:
203 		scnprintf(name, name_len,
204 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
205 			  ath12k_bus_str(ab->hif.bus),
206 			  ab->qmi.target.chip_id,
207 			  ab->qmi.target.board_id, variant);
208 		break;
209 	}
210 
211 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
212 
213 	return 0;
214 }
215 
ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len)216 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
217 					 size_t name_len)
218 {
219 	return __ath12k_core_create_board_name(ab, name, name_len, true, false);
220 }
221 
ath12k_core_create_fallback_board_name(struct ath12k_base * ab,char * name,size_t name_len)222 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
223 						  size_t name_len)
224 {
225 	return __ath12k_core_create_board_name(ab, name, name_len, false, false);
226 }
227 
ath12k_core_create_bus_type_board_name(struct ath12k_base * ab,char * name,size_t name_len)228 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
229 						  size_t name_len)
230 {
231 	return __ath12k_core_create_board_name(ab, name, name_len, false, true);
232 }
233 
ath12k_core_firmware_request(struct ath12k_base * ab,const char * file)234 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
235 						    const char *file)
236 {
237 	const struct firmware *fw;
238 	char path[100];
239 	int ret;
240 
241 	if (!file)
242 		return ERR_PTR(-ENOENT);
243 
244 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
245 
246 	ret = firmware_request_nowarn(&fw, path, ab->dev);
247 	if (ret)
248 		return ERR_PTR(ret);
249 
250 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
251 		   path, fw->size);
252 
253 	return fw;
254 }
255 
ath12k_core_free_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)256 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
257 {
258 	if (!IS_ERR(bd->fw))
259 		release_firmware(bd->fw);
260 
261 	memset(bd, 0, sizeof(*bd));
262 }
263 
ath12k_core_parse_bd_ie_board(struct ath12k_base * ab,struct ath12k_board_data * bd,const void * buf,size_t buf_len,const char * boardname,int ie_id,int name_id,int data_id)264 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
265 					 struct ath12k_board_data *bd,
266 					 const void *buf, size_t buf_len,
267 					 const char *boardname,
268 					 int ie_id,
269 					 int name_id,
270 					 int data_id)
271 {
272 	const struct ath12k_fw_ie *hdr;
273 	bool name_match_found;
274 	int ret, board_ie_id;
275 	size_t board_ie_len;
276 	const void *board_ie_data;
277 
278 	name_match_found = false;
279 
280 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
281 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
282 		hdr = buf;
283 		board_ie_id = le32_to_cpu(hdr->id);
284 		board_ie_len = le32_to_cpu(hdr->len);
285 		board_ie_data = hdr->data;
286 
287 		buf_len -= sizeof(*hdr);
288 		buf += sizeof(*hdr);
289 
290 		if (buf_len < ALIGN(board_ie_len, 4)) {
291 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
292 				   ath12k_bd_ie_type_str(ie_id),
293 				   buf_len, ALIGN(board_ie_len, 4));
294 			ret = -EINVAL;
295 			goto out;
296 		}
297 
298 		if (board_ie_id == name_id) {
299 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
300 					board_ie_data, board_ie_len);
301 
302 			if (board_ie_len != strlen(boardname))
303 				goto next;
304 
305 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
306 			if (ret)
307 				goto next;
308 
309 			name_match_found = true;
310 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
311 				   "boot found match %s for name '%s'",
312 				   ath12k_bd_ie_type_str(ie_id),
313 				   boardname);
314 		} else if (board_ie_id == data_id) {
315 			if (!name_match_found)
316 				/* no match found */
317 				goto next;
318 
319 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
320 				   "boot found %s for '%s'",
321 				   ath12k_bd_ie_type_str(ie_id),
322 				   boardname);
323 
324 			bd->data = board_ie_data;
325 			bd->len = board_ie_len;
326 
327 			ret = 0;
328 			goto out;
329 		} else {
330 			ath12k_warn(ab, "unknown %s id found: %d\n",
331 				    ath12k_bd_ie_type_str(ie_id),
332 				    board_ie_id);
333 		}
334 next:
335 		/* jump over the padding */
336 		board_ie_len = ALIGN(board_ie_len, 4);
337 
338 		buf_len -= board_ie_len;
339 		buf += board_ie_len;
340 	}
341 
342 	/* no match found */
343 	ret = -ENOENT;
344 
345 out:
346 	return ret;
347 }
348 
ath12k_core_fetch_board_data_api_n(struct ath12k_base * ab,struct ath12k_board_data * bd,const char * boardname,int ie_id_match,int name_id,int data_id)349 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
350 					      struct ath12k_board_data *bd,
351 					      const char *boardname,
352 					      int ie_id_match,
353 					      int name_id,
354 					      int data_id)
355 {
356 	size_t len, magic_len;
357 	const u8 *data;
358 	char *filename, filepath[100];
359 	size_t ie_len;
360 	struct ath12k_fw_ie *hdr;
361 	int ret, ie_id;
362 
363 	filename = ATH12K_BOARD_API2_FILE;
364 
365 	if (!bd->fw)
366 		bd->fw = ath12k_core_firmware_request(ab, filename);
367 
368 	if (IS_ERR(bd->fw))
369 		return PTR_ERR(bd->fw);
370 
371 	data = bd->fw->data;
372 	len = bd->fw->size;
373 
374 	ath12k_core_create_firmware_path(ab, filename,
375 					 filepath, sizeof(filepath));
376 
377 	/* magic has extra null byte padded */
378 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
379 	if (len < magic_len) {
380 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
381 			   filepath, len);
382 		ret = -EINVAL;
383 		goto err;
384 	}
385 
386 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
387 		ath12k_err(ab, "found invalid board magic\n");
388 		ret = -EINVAL;
389 		goto err;
390 	}
391 
392 	/* magic is padded to 4 bytes */
393 	magic_len = ALIGN(magic_len, 4);
394 	if (len < magic_len) {
395 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
396 			   filepath, len);
397 		ret = -EINVAL;
398 		goto err;
399 	}
400 
401 	data += magic_len;
402 	len -= magic_len;
403 
404 	while (len > sizeof(struct ath12k_fw_ie)) {
405 		hdr = (struct ath12k_fw_ie *)data;
406 		ie_id = le32_to_cpu(hdr->id);
407 		ie_len = le32_to_cpu(hdr->len);
408 
409 		len -= sizeof(*hdr);
410 		data = hdr->data;
411 
412 		if (len < ALIGN(ie_len, 4)) {
413 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
414 				   ie_id, ie_len, len);
415 			ret = -EINVAL;
416 			goto err;
417 		}
418 
419 		if (ie_id == ie_id_match) {
420 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
421 							    ie_len,
422 							    boardname,
423 							    ie_id_match,
424 							    name_id,
425 							    data_id);
426 			if (ret == -ENOENT)
427 				/* no match found, continue */
428 				goto next;
429 			else if (ret)
430 				/* there was an error, bail out */
431 				goto err;
432 			/* either found or error, so stop searching */
433 			goto out;
434 		}
435 next:
436 		/* jump over the padding */
437 		ie_len = ALIGN(ie_len, 4);
438 
439 		len -= ie_len;
440 		data += ie_len;
441 	}
442 
443 out:
444 	if (!bd->data || !bd->len) {
445 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
446 			   "failed to fetch %s for %s from %s\n",
447 			   ath12k_bd_ie_type_str(ie_id_match),
448 			   boardname, filepath);
449 		ret = -ENODATA;
450 		goto err;
451 	}
452 
453 	return 0;
454 
455 err:
456 	ath12k_core_free_bdf(ab, bd);
457 	return ret;
458 }
459 
ath12k_core_fetch_board_data_api_1(struct ath12k_base * ab,struct ath12k_board_data * bd,char * filename)460 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
461 				       struct ath12k_board_data *bd,
462 				       char *filename)
463 {
464 	bd->fw = ath12k_core_firmware_request(ab, filename);
465 	if (IS_ERR(bd->fw))
466 		return PTR_ERR(bd->fw);
467 
468 	bd->data = bd->fw->data;
469 	bd->len = bd->fw->size;
470 
471 	return 0;
472 }
473 
474 #define BOARD_NAME_SIZE 200
ath12k_core_fetch_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)475 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
476 {
477 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
478 	char *filename, filepath[100];
479 	int bd_api;
480 	int ret;
481 
482 	filename = ATH12K_BOARD_API2_FILE;
483 
484 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
485 	if (ret) {
486 		ath12k_err(ab, "failed to create board name: %d", ret);
487 		return ret;
488 	}
489 
490 	bd_api = 2;
491 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
492 						 ATH12K_BD_IE_BOARD,
493 						 ATH12K_BD_IE_BOARD_NAME,
494 						 ATH12K_BD_IE_BOARD_DATA);
495 	if (!ret)
496 		goto success;
497 
498 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
499 						     sizeof(fallback_boardname));
500 	if (ret) {
501 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
502 		return ret;
503 	}
504 
505 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
506 						 ATH12K_BD_IE_BOARD,
507 						 ATH12K_BD_IE_BOARD_NAME,
508 						 ATH12K_BD_IE_BOARD_DATA);
509 	if (!ret)
510 		goto success;
511 
512 	bd_api = 1;
513 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
514 	if (ret) {
515 		ath12k_core_create_firmware_path(ab, filename,
516 						 filepath, sizeof(filepath));
517 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
518 			   boardname, filepath);
519 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
520 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
521 				   fallback_boardname, filepath);
522 
523 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
524 			   ab->hw_params->fw.dir);
525 		return ret;
526 	}
527 
528 success:
529 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
530 	return 0;
531 }
532 
ath12k_core_fetch_regdb(struct ath12k_base * ab,struct ath12k_board_data * bd)533 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
534 {
535 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
536 	int ret;
537 
538 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
539 	if (ret) {
540 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
541 			   "failed to create board name for regdb: %d", ret);
542 		goto exit;
543 	}
544 
545 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
546 						 ATH12K_BD_IE_REGDB,
547 						 ATH12K_BD_IE_REGDB_NAME,
548 						 ATH12K_BD_IE_REGDB_DATA);
549 	if (!ret)
550 		goto exit;
551 
552 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
553 						     BOARD_NAME_SIZE);
554 	if (ret) {
555 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
556 			   "failed to create default board name for regdb: %d", ret);
557 		goto exit;
558 	}
559 
560 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
561 						 ATH12K_BD_IE_REGDB,
562 						 ATH12K_BD_IE_REGDB_NAME,
563 						 ATH12K_BD_IE_REGDB_DATA);
564 	if (!ret)
565 		goto exit;
566 
567 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
568 	if (ret)
569 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
570 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
571 
572 exit:
573 	if (!ret)
574 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
575 
576 	return ret;
577 }
578 
ath12k_core_get_max_station_per_radio(struct ath12k_base * ab)579 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
580 {
581 	if (ab->num_radios == 2)
582 		return TARGET_NUM_STATIONS_DBS;
583 	else if (ab->num_radios == 3)
584 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
585 	return TARGET_NUM_STATIONS_SINGLE;
586 }
587 
ath12k_core_get_max_peers_per_radio(struct ath12k_base * ab)588 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
589 {
590 	if (ab->num_radios == 2)
591 		return TARGET_NUM_PEERS_PDEV_DBS;
592 	else if (ab->num_radios == 3)
593 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
594 	return TARGET_NUM_PEERS_PDEV_SINGLE;
595 }
596 
ath12k_core_get_max_num_tids(struct ath12k_base * ab)597 u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
598 {
599 	if (ab->num_radios == 2)
600 		return TARGET_NUM_TIDS(DBS);
601 	else if (ab->num_radios == 3)
602 		return TARGET_NUM_TIDS(DBS_SBS);
603 	return TARGET_NUM_TIDS(SINGLE);
604 }
605 
ath12k_core_stop(struct ath12k_base * ab)606 static void ath12k_core_stop(struct ath12k_base *ab)
607 {
608 	ath12k_core_stopped(ab);
609 
610 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
611 		ath12k_qmi_firmware_stop(ab);
612 
613 	ath12k_acpi_stop(ab);
614 
615 	ath12k_dp_rx_pdev_reo_cleanup(ab);
616 	ath12k_hif_stop(ab);
617 	ath12k_wmi_detach(ab);
618 	ath12k_dp_free(ab);
619 
620 	/* De-Init of components as needed */
621 }
622 
ath12k_core_check_bdfext(const struct dmi_header * hdr,void * data)623 static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
624 {
625 	struct ath12k_base *ab = data;
626 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
627 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
628 	ssize_t copied;
629 	size_t len;
630 	int i;
631 
632 	if (ab->qmi.target.bdf_ext[0] != '\0')
633 		return;
634 
635 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
636 		return;
637 
638 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
639 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
640 			   "wrong smbios bdf ext type length (%d).\n",
641 			   hdr->length);
642 		return;
643 	}
644 
645 	if (!smbios->bdf_enabled) {
646 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
647 		return;
648 	}
649 
650 	/* Only one string exists (per spec) */
651 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
652 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
653 			   "bdf variant magic does not match.\n");
654 		return;
655 	}
656 
657 	len = min_t(size_t,
658 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
659 	for (i = 0; i < len; i++) {
660 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
661 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
662 				   "bdf variant name contains non ascii chars.\n");
663 			return;
664 		}
665 	}
666 
667 	/* Copy extension name without magic prefix */
668 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
669 			 sizeof(ab->qmi.target.bdf_ext));
670 	if (copied < 0) {
671 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
672 			   "bdf variant string is longer than the buffer can accommodate\n");
673 		return;
674 	}
675 
676 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
677 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
678 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
679 }
680 
ath12k_core_check_smbios(struct ath12k_base * ab)681 int ath12k_core_check_smbios(struct ath12k_base *ab)
682 {
683 	ab->qmi.target.bdf_ext[0] = '\0';
684 	dmi_walk(ath12k_core_check_bdfext, ab);
685 
686 	if (ab->qmi.target.bdf_ext[0] == '\0')
687 		return -ENODATA;
688 
689 	return 0;
690 }
691 
ath12k_core_soc_create(struct ath12k_base * ab)692 static int ath12k_core_soc_create(struct ath12k_base *ab)
693 {
694 	int ret;
695 
696 	ret = ath12k_qmi_init_service(ab);
697 	if (ret) {
698 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
699 		return ret;
700 	}
701 
702 	ath12k_debugfs_soc_create(ab);
703 
704 	ret = ath12k_hif_power_up(ab);
705 	if (ret) {
706 		ath12k_err(ab, "failed to power up :%d\n", ret);
707 		goto err_qmi_deinit;
708 	}
709 
710 	return 0;
711 
712 err_qmi_deinit:
713 	ath12k_debugfs_soc_destroy(ab);
714 	ath12k_qmi_deinit_service(ab);
715 	return ret;
716 }
717 
ath12k_core_soc_destroy(struct ath12k_base * ab)718 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
719 {
720 	ath12k_hif_power_down(ab, false);
721 	ath12k_reg_free(ab);
722 	ath12k_debugfs_soc_destroy(ab);
723 	ath12k_qmi_deinit_service(ab);
724 }
725 
ath12k_core_pdev_create(struct ath12k_base * ab)726 static int ath12k_core_pdev_create(struct ath12k_base *ab)
727 {
728 	int ret;
729 
730 	ret = ath12k_dp_pdev_alloc(ab);
731 	if (ret) {
732 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
733 		return ret;
734 	}
735 
736 	return 0;
737 }
738 
ath12k_core_pdev_destroy(struct ath12k_base * ab)739 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
740 {
741 	ath12k_dp_pdev_free(ab);
742 }
743 
ath12k_core_start(struct ath12k_base * ab,enum ath12k_firmware_mode mode)744 static int ath12k_core_start(struct ath12k_base *ab,
745 			     enum ath12k_firmware_mode mode)
746 {
747 	int ret;
748 
749 	lockdep_assert_held(&ab->core_lock);
750 
751 	ret = ath12k_wmi_attach(ab);
752 	if (ret) {
753 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
754 		return ret;
755 	}
756 
757 	ret = ath12k_htc_init(ab);
758 	if (ret) {
759 		ath12k_err(ab, "failed to init htc: %d\n", ret);
760 		goto err_wmi_detach;
761 	}
762 
763 	ret = ath12k_hif_start(ab);
764 	if (ret) {
765 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
766 		goto err_wmi_detach;
767 	}
768 
769 	ret = ath12k_htc_wait_target(&ab->htc);
770 	if (ret) {
771 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
772 		goto err_hif_stop;
773 	}
774 
775 	ret = ath12k_dp_htt_connect(&ab->dp);
776 	if (ret) {
777 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
778 		goto err_hif_stop;
779 	}
780 
781 	ret = ath12k_wmi_connect(ab);
782 	if (ret) {
783 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
784 		goto err_hif_stop;
785 	}
786 
787 	ret = ath12k_htc_start(&ab->htc);
788 	if (ret) {
789 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
790 		goto err_hif_stop;
791 	}
792 
793 	ret = ath12k_wmi_wait_for_service_ready(ab);
794 	if (ret) {
795 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
796 			   ret);
797 		goto err_hif_stop;
798 	}
799 
800 	ath12k_dp_cc_config(ab);
801 
802 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
803 	if (ret) {
804 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
805 		goto err_hif_stop;
806 	}
807 
808 	ath12k_dp_hal_rx_desc_init(ab);
809 
810 	ret = ath12k_wmi_cmd_init(ab);
811 	if (ret) {
812 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
813 		goto err_reo_cleanup;
814 	}
815 
816 	ret = ath12k_wmi_wait_for_unified_ready(ab);
817 	if (ret) {
818 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
819 			   ret);
820 		goto err_reo_cleanup;
821 	}
822 
823 	/* put hardware to DBS mode */
824 	if (ab->hw_params->single_pdev_only) {
825 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
826 		if (ret) {
827 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
828 			goto err_reo_cleanup;
829 		}
830 	}
831 
832 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
833 	if (ret) {
834 		ath12k_err(ab, "failed to send htt version request message: %d\n",
835 			   ret);
836 		goto err_reo_cleanup;
837 	}
838 
839 	ret = ath12k_acpi_start(ab);
840 	if (ret)
841 		/* ACPI is optional so continue in case of an error */
842 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
843 
844 	if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
845 		/* Indicate the core start in the appropriate group */
846 		ath12k_core_started(ab);
847 
848 	return 0;
849 
850 err_reo_cleanup:
851 	ath12k_dp_rx_pdev_reo_cleanup(ab);
852 err_hif_stop:
853 	ath12k_hif_stop(ab);
854 err_wmi_detach:
855 	ath12k_wmi_detach(ab);
856 	return ret;
857 }
858 
ath12k_core_device_cleanup(struct ath12k_base * ab)859 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
860 {
861 	mutex_lock(&ab->core_lock);
862 
863 	ath12k_hif_irq_disable(ab);
864 	ath12k_core_pdev_destroy(ab);
865 
866 	mutex_unlock(&ab->core_lock);
867 }
868 
ath12k_core_hw_group_stop(struct ath12k_hw_group * ag)869 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
870 {
871 	struct ath12k_base *ab;
872 	int i;
873 
874 	lockdep_assert_held(&ag->mutex);
875 
876 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
877 
878 	ath12k_mac_unregister(ag);
879 
880 	for (i = ag->num_devices - 1; i >= 0; i--) {
881 		ab = ag->ab[i];
882 		if (!ab)
883 			continue;
884 		ath12k_core_device_cleanup(ab);
885 	}
886 
887 	ath12k_mac_destroy(ag);
888 }
889 
__ath12k_mac_mlo_ready(struct ath12k * ar)890 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
891 {
892 	int ret;
893 
894 	ret = ath12k_wmi_mlo_ready(ar);
895 	if (ret) {
896 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
897 			   ar->pdev_idx, ret);
898 		return ret;
899 	}
900 
901 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
902 		   ar->pdev_idx);
903 
904 	return 0;
905 }
906 
ath12k_mac_mlo_ready(struct ath12k_hw_group * ag)907 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
908 {
909 	struct ath12k_hw *ah;
910 	struct ath12k *ar;
911 	int ret;
912 	int i, j;
913 
914 	for (i = 0; i < ag->num_hw; i++) {
915 		ah = ag->ah[i];
916 		if (!ah)
917 			continue;
918 
919 		for_each_ar(ah, ar, j) {
920 			ar = &ah->radio[j];
921 			ret = __ath12k_mac_mlo_ready(ar);
922 			if (ret)
923 				goto out;
924 		}
925 	}
926 
927 out:
928 	return ret;
929 }
930 
ath12k_core_mlo_setup(struct ath12k_hw_group * ag)931 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
932 {
933 	int ret, i;
934 
935 	if (!ag->mlo_capable || ag->num_devices == 1)
936 		return 0;
937 
938 	ret = ath12k_mac_mlo_setup(ag);
939 	if (ret)
940 		return ret;
941 
942 	for (i = 0; i < ag->num_devices; i++)
943 		ath12k_dp_partner_cc_init(ag->ab[i]);
944 
945 	ret = ath12k_mac_mlo_ready(ag);
946 	if (ret)
947 		goto err_mlo_teardown;
948 
949 	return 0;
950 
951 err_mlo_teardown:
952 	ath12k_mac_mlo_teardown(ag);
953 
954 	return ret;
955 }
956 
ath12k_core_hw_group_start(struct ath12k_hw_group * ag)957 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
958 {
959 	struct ath12k_base *ab;
960 	int ret, i;
961 
962 	lockdep_assert_held(&ag->mutex);
963 
964 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
965 		goto core_pdev_create;
966 
967 	ret = ath12k_mac_allocate(ag);
968 	if (WARN_ON(ret))
969 		return ret;
970 
971 	ret = ath12k_core_mlo_setup(ag);
972 	if (WARN_ON(ret))
973 		goto err_mac_destroy;
974 
975 	ret = ath12k_mac_register(ag);
976 	if (WARN_ON(ret))
977 		goto err_mlo_teardown;
978 
979 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
980 
981 core_pdev_create:
982 	for (i = 0; i < ag->num_devices; i++) {
983 		ab = ag->ab[i];
984 		if (!ab)
985 			continue;
986 
987 		mutex_lock(&ab->core_lock);
988 
989 		ret = ath12k_core_pdev_create(ab);
990 		if (ret) {
991 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
992 			mutex_unlock(&ab->core_lock);
993 			goto err;
994 		}
995 
996 		ath12k_hif_irq_enable(ab);
997 
998 		ret = ath12k_core_rfkill_config(ab);
999 		if (ret && ret != -EOPNOTSUPP) {
1000 			mutex_unlock(&ab->core_lock);
1001 			goto err;
1002 		}
1003 
1004 		mutex_unlock(&ab->core_lock);
1005 	}
1006 
1007 	return 0;
1008 
1009 err:
1010 	ath12k_core_hw_group_stop(ag);
1011 	return ret;
1012 
1013 err_mlo_teardown:
1014 	ath12k_mac_mlo_teardown(ag);
1015 
1016 err_mac_destroy:
1017 	ath12k_mac_destroy(ag);
1018 
1019 	return ret;
1020 }
1021 
ath12k_core_start_firmware(struct ath12k_base * ab,enum ath12k_firmware_mode mode)1022 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1023 				      enum ath12k_firmware_mode mode)
1024 {
1025 	int ret;
1026 
1027 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1028 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1029 
1030 	ret = ath12k_qmi_firmware_start(ab, mode);
1031 	if (ret) {
1032 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1033 		return ret;
1034 	}
1035 
1036 	return ret;
1037 }
1038 
1039 static inline
ath12k_core_hw_group_start_ready(struct ath12k_hw_group * ag)1040 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1041 {
1042 	lockdep_assert_held(&ag->mutex);
1043 
1044 	return (ag->num_started == ag->num_devices);
1045 }
1046 
ath12k_core_trigger_partner(struct ath12k_base * ab)1047 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1048 {
1049 	struct ath12k_hw_group *ag = ab->ag;
1050 	struct ath12k_base *partner_ab;
1051 	bool found = false;
1052 	int i;
1053 
1054 	for (i = 0; i < ag->num_devices; i++) {
1055 		partner_ab = ag->ab[i];
1056 		if (!partner_ab)
1057 			continue;
1058 
1059 		if (found)
1060 			ath12k_qmi_trigger_host_cap(partner_ab);
1061 
1062 		found = (partner_ab == ab);
1063 	}
1064 }
1065 
ath12k_core_qmi_firmware_ready(struct ath12k_base * ab)1066 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1067 {
1068 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1069 	int ret, i;
1070 
1071 	ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
1072 	if (ret) {
1073 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1074 		return ret;
1075 	}
1076 
1077 	ret = ath12k_ce_init_pipes(ab);
1078 	if (ret) {
1079 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1080 		goto err_firmware_stop;
1081 	}
1082 
1083 	ret = ath12k_dp_alloc(ab);
1084 	if (ret) {
1085 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1086 		goto err_firmware_stop;
1087 	}
1088 
1089 	mutex_lock(&ag->mutex);
1090 	mutex_lock(&ab->core_lock);
1091 
1092 	ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
1093 	if (ret) {
1094 		ath12k_err(ab, "failed to start core: %d\n", ret);
1095 		goto err_dp_free;
1096 	}
1097 
1098 	mutex_unlock(&ab->core_lock);
1099 
1100 	if (ath12k_core_hw_group_start_ready(ag)) {
1101 		ret = ath12k_core_hw_group_start(ag);
1102 		if (ret) {
1103 			ath12k_warn(ab, "unable to start hw group\n");
1104 			goto err_core_stop;
1105 		}
1106 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1107 	} else {
1108 		ath12k_core_trigger_partner(ab);
1109 	}
1110 
1111 	mutex_unlock(&ag->mutex);
1112 
1113 	return 0;
1114 
1115 err_core_stop:
1116 	for (i = ag->num_devices - 1; i >= 0; i--) {
1117 		ab = ag->ab[i];
1118 		if (!ab)
1119 			continue;
1120 
1121 		mutex_lock(&ab->core_lock);
1122 		ath12k_core_stop(ab);
1123 		mutex_unlock(&ab->core_lock);
1124 	}
1125 	mutex_unlock(&ag->mutex);
1126 	goto exit;
1127 
1128 err_dp_free:
1129 	ath12k_dp_free(ab);
1130 	mutex_unlock(&ab->core_lock);
1131 	mutex_unlock(&ag->mutex);
1132 
1133 err_firmware_stop:
1134 	ath12k_qmi_firmware_stop(ab);
1135 
1136 exit:
1137 	return ret;
1138 }
1139 
ath12k_core_reconfigure_on_crash(struct ath12k_base * ab)1140 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1141 {
1142 	int ret;
1143 
1144 	mutex_lock(&ab->core_lock);
1145 	ath12k_dp_pdev_free(ab);
1146 	ath12k_ce_cleanup_pipes(ab);
1147 	ath12k_wmi_detach(ab);
1148 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1149 	mutex_unlock(&ab->core_lock);
1150 
1151 	ath12k_dp_free(ab);
1152 	ath12k_hal_srng_deinit(ab);
1153 
1154 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
1155 
1156 	ret = ath12k_hal_srng_init(ab);
1157 	if (ret)
1158 		return ret;
1159 
1160 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1161 
1162 	ret = ath12k_core_qmi_firmware_ready(ab);
1163 	if (ret)
1164 		goto err_hal_srng_deinit;
1165 
1166 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1167 
1168 	return 0;
1169 
1170 err_hal_srng_deinit:
1171 	ath12k_hal_srng_deinit(ab);
1172 	return ret;
1173 }
1174 
ath12k_rfkill_work(struct work_struct * work)1175 static void ath12k_rfkill_work(struct work_struct *work)
1176 {
1177 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1178 	struct ath12k_hw_group *ag = ab->ag;
1179 	struct ath12k *ar;
1180 	struct ath12k_hw *ah;
1181 	struct ieee80211_hw *hw;
1182 	bool rfkill_radio_on;
1183 	int i, j;
1184 
1185 	spin_lock_bh(&ab->base_lock);
1186 	rfkill_radio_on = ab->rfkill_radio_on;
1187 	spin_unlock_bh(&ab->base_lock);
1188 
1189 	for (i = 0; i < ag->num_hw; i++) {
1190 		ah = ath12k_ag_to_ah(ag, i);
1191 		if (!ah)
1192 			continue;
1193 
1194 		for (j = 0; j < ah->num_radio; j++) {
1195 			ar = &ah->radio[j];
1196 			if (!ar)
1197 				continue;
1198 
1199 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1200 		}
1201 
1202 		hw = ah->hw;
1203 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1204 	}
1205 }
1206 
ath12k_core_halt(struct ath12k * ar)1207 void ath12k_core_halt(struct ath12k *ar)
1208 {
1209 	struct ath12k_base *ab = ar->ab;
1210 
1211 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1212 
1213 	ar->num_created_vdevs = 0;
1214 	ar->allocated_vdev_map = 0;
1215 
1216 	ath12k_mac_scan_finish(ar);
1217 	ath12k_mac_peer_cleanup_all(ar);
1218 	cancel_delayed_work_sync(&ar->scan.timeout);
1219 	cancel_work_sync(&ar->regd_update_work);
1220 	cancel_work_sync(&ab->rfkill_work);
1221 
1222 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1223 	synchronize_rcu();
1224 	INIT_LIST_HEAD(&ar->arvifs);
1225 	idr_init(&ar->txmgmt_idr);
1226 }
1227 
ath12k_core_pre_reconfigure_recovery(struct ath12k_base * ab)1228 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1229 {
1230 	struct ath12k_hw_group *ag = ab->ag;
1231 	struct ath12k *ar;
1232 	struct ath12k_hw *ah;
1233 	int i, j;
1234 
1235 	spin_lock_bh(&ab->base_lock);
1236 	ab->stats.fw_crash_counter++;
1237 	spin_unlock_bh(&ab->base_lock);
1238 
1239 	if (ab->is_reset)
1240 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1241 
1242 	for (i = 0; i < ag->num_hw; i++) {
1243 		ah = ath12k_ag_to_ah(ag, i);
1244 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1245 			continue;
1246 
1247 		ieee80211_stop_queues(ah->hw);
1248 
1249 		for (j = 0; j < ah->num_radio; j++) {
1250 			ar = &ah->radio[j];
1251 
1252 			ath12k_mac_drain_tx(ar);
1253 			complete(&ar->scan.started);
1254 			complete(&ar->scan.completed);
1255 			complete(&ar->scan.on_channel);
1256 			complete(&ar->peer_assoc_done);
1257 			complete(&ar->peer_delete_done);
1258 			complete(&ar->install_key_done);
1259 			complete(&ar->vdev_setup_done);
1260 			complete(&ar->vdev_delete_done);
1261 			complete(&ar->bss_survey_done);
1262 
1263 			wake_up(&ar->dp.tx_empty_waitq);
1264 			idr_for_each(&ar->txmgmt_idr,
1265 				     ath12k_mac_tx_mgmt_pending_free, ar);
1266 			idr_destroy(&ar->txmgmt_idr);
1267 			wake_up(&ar->txmgmt_empty_waitq);
1268 		}
1269 	}
1270 
1271 	wake_up(&ab->wmi_ab.tx_credits_wq);
1272 	wake_up(&ab->peer_mapping_wq);
1273 }
1274 
ath12k_core_post_reconfigure_recovery(struct ath12k_base * ab)1275 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1276 {
1277 	struct ath12k_hw_group *ag = ab->ag;
1278 	struct ath12k_hw *ah;
1279 	struct ath12k *ar;
1280 	int i, j;
1281 
1282 	for (i = 0; i < ag->num_hw; i++) {
1283 		ah = ath12k_ag_to_ah(ag, i);
1284 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1285 			continue;
1286 
1287 		wiphy_lock(ah->hw->wiphy);
1288 		mutex_lock(&ah->hw_mutex);
1289 
1290 		switch (ah->state) {
1291 		case ATH12K_HW_STATE_ON:
1292 			ah->state = ATH12K_HW_STATE_RESTARTING;
1293 
1294 			for (j = 0; j < ah->num_radio; j++) {
1295 				ar = &ah->radio[j];
1296 				ath12k_core_halt(ar);
1297 			}
1298 
1299 			break;
1300 		case ATH12K_HW_STATE_OFF:
1301 			ath12k_warn(ab,
1302 				    "cannot restart hw %d that hasn't been started\n",
1303 				    i);
1304 			break;
1305 		case ATH12K_HW_STATE_RESTARTING:
1306 			break;
1307 		case ATH12K_HW_STATE_RESTARTED:
1308 			ah->state = ATH12K_HW_STATE_WEDGED;
1309 			fallthrough;
1310 		case ATH12K_HW_STATE_WEDGED:
1311 			ath12k_warn(ab,
1312 				    "device is wedged, will not restart hw %d\n", i);
1313 			break;
1314 		}
1315 
1316 		mutex_unlock(&ah->hw_mutex);
1317 		wiphy_unlock(ah->hw->wiphy);
1318 	}
1319 
1320 	complete(&ab->driver_recovery);
1321 }
1322 
ath12k_core_restart(struct work_struct * work)1323 static void ath12k_core_restart(struct work_struct *work)
1324 {
1325 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1326 	struct ath12k_hw_group *ag = ab->ag;
1327 	struct ath12k_hw *ah;
1328 	int ret, i;
1329 
1330 	ret = ath12k_core_reconfigure_on_crash(ab);
1331 	if (ret) {
1332 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1333 		return;
1334 	}
1335 
1336 	if (ab->is_reset) {
1337 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1338 			atomic_dec(&ab->reset_count);
1339 			complete(&ab->reset_complete);
1340 			ab->is_reset = false;
1341 			atomic_set(&ab->fail_cont_count, 0);
1342 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1343 		}
1344 
1345 		for (i = 0; i < ag->num_hw; i++) {
1346 			ah = ath12k_ag_to_ah(ab->ag, i);
1347 			ieee80211_restart_hw(ah->hw);
1348 		}
1349 	}
1350 
1351 	complete(&ab->restart_completed);
1352 }
1353 
ath12k_core_reset(struct work_struct * work)1354 static void ath12k_core_reset(struct work_struct *work)
1355 {
1356 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1357 	int reset_count, fail_cont_count;
1358 	long time_left;
1359 
1360 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1361 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1362 		return;
1363 	}
1364 
1365 	/* Sometimes the recovery will fail and then the next all recovery fail,
1366 	 * this is to avoid infinite recovery since it can not recovery success
1367 	 */
1368 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1369 
1370 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1371 		return;
1372 
1373 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1374 	    time_before(jiffies, ab->reset_fail_timeout))
1375 		return;
1376 
1377 	reset_count = atomic_inc_return(&ab->reset_count);
1378 
1379 	if (reset_count > 1) {
1380 		/* Sometimes it happened another reset worker before the previous one
1381 		 * completed, then the second reset worker will destroy the previous one,
1382 		 * thus below is to avoid that.
1383 		 */
1384 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1385 
1386 		reinit_completion(&ab->reset_complete);
1387 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1388 							ATH12K_RESET_TIMEOUT_HZ);
1389 		if (time_left) {
1390 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1391 			atomic_dec(&ab->reset_count);
1392 			return;
1393 		}
1394 
1395 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1396 		/* Record the continuous recovery fail count when recovery failed*/
1397 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1398 	}
1399 
1400 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1401 
1402 	ab->is_reset = true;
1403 	atomic_set(&ab->recovery_count, 0);
1404 
1405 	ath12k_coredump_collect(ab);
1406 	ath12k_core_pre_reconfigure_recovery(ab);
1407 
1408 	ath12k_core_post_reconfigure_recovery(ab);
1409 
1410 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1411 
1412 	ath12k_hif_irq_disable(ab);
1413 	ath12k_hif_ce_irq_disable(ab);
1414 
1415 	ath12k_hif_power_down(ab, false);
1416 	ath12k_hif_power_up(ab);
1417 
1418 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1419 }
1420 
ath12k_core_pre_init(struct ath12k_base * ab)1421 int ath12k_core_pre_init(struct ath12k_base *ab)
1422 {
1423 	int ret;
1424 
1425 	ret = ath12k_hw_init(ab);
1426 	if (ret) {
1427 		ath12k_err(ab, "failed to init hw params: %d\n", ret);
1428 		return ret;
1429 	}
1430 
1431 	ath12k_fw_map(ab);
1432 
1433 	return 0;
1434 }
1435 
ath12k_core_panic_handler(struct notifier_block * nb,unsigned long action,void * data)1436 static int ath12k_core_panic_handler(struct notifier_block *nb,
1437 				     unsigned long action, void *data)
1438 {
1439 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1440 					      panic_nb);
1441 
1442 	return ath12k_hif_panic_handler(ab);
1443 }
1444 
ath12k_core_panic_notifier_register(struct ath12k_base * ab)1445 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1446 {
1447 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1448 
1449 	return atomic_notifier_chain_register(&panic_notifier_list,
1450 					      &ab->panic_nb);
1451 }
1452 
ath12k_core_panic_notifier_unregister(struct ath12k_base * ab)1453 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1454 {
1455 	atomic_notifier_chain_unregister(&panic_notifier_list,
1456 					 &ab->panic_nb);
1457 }
1458 
1459 static inline
ath12k_core_hw_group_create_ready(struct ath12k_hw_group * ag)1460 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1461 {
1462 	lockdep_assert_held(&ag->mutex);
1463 
1464 	return (ag->num_probed == ag->num_devices);
1465 }
1466 
ath12k_core_hw_group_alloc(struct ath12k_base * ab)1467 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1468 {
1469 	struct ath12k_hw_group *ag;
1470 	int count = 0;
1471 
1472 	lockdep_assert_held(&ath12k_hw_group_mutex);
1473 
1474 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1475 		count++;
1476 
1477 	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
1478 	if (!ag)
1479 		return NULL;
1480 
1481 	ag->id = count;
1482 	list_add(&ag->list, &ath12k_hw_group_list);
1483 	mutex_init(&ag->mutex);
1484 	ag->mlo_capable = false;
1485 
1486 	return ag;
1487 }
1488 
ath12k_core_hw_group_free(struct ath12k_hw_group * ag)1489 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1490 {
1491 	mutex_lock(&ath12k_hw_group_mutex);
1492 
1493 	list_del(&ag->list);
1494 	kfree(ag);
1495 
1496 	mutex_unlock(&ath12k_hw_group_mutex);
1497 }
1498 
ath12k_core_hw_group_find_by_dt(struct ath12k_base * ab)1499 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1500 {
1501 	struct ath12k_hw_group *ag;
1502 	int i;
1503 
1504 	if (!ab->dev->of_node)
1505 		return NULL;
1506 
1507 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1508 		for (i = 0; i < ag->num_devices; i++)
1509 			if (ag->wsi_node[i] == ab->dev->of_node)
1510 				return ag;
1511 
1512 	return NULL;
1513 }
1514 
ath12k_core_get_wsi_info(struct ath12k_hw_group * ag,struct ath12k_base * ab)1515 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1516 				    struct ath12k_base *ab)
1517 {
1518 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1519 	struct device_node *tx_endpoint, *next_rx_endpoint;
1520 	int device_count = 0;
1521 
1522 	next_wsi_dev = wsi_dev;
1523 
1524 	if (!next_wsi_dev)
1525 		return -ENODEV;
1526 
1527 	do {
1528 		ag->wsi_node[device_count] = next_wsi_dev;
1529 
1530 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1531 		if (!tx_endpoint) {
1532 			of_node_put(next_wsi_dev);
1533 			return -ENODEV;
1534 		}
1535 
1536 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1537 		if (!next_rx_endpoint) {
1538 			of_node_put(next_wsi_dev);
1539 			of_node_put(tx_endpoint);
1540 			return -ENODEV;
1541 		}
1542 
1543 		of_node_put(tx_endpoint);
1544 		of_node_put(next_wsi_dev);
1545 
1546 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1547 		if (!next_wsi_dev) {
1548 			of_node_put(next_rx_endpoint);
1549 			return -ENODEV;
1550 		}
1551 
1552 		of_node_put(next_rx_endpoint);
1553 
1554 		device_count++;
1555 		if (device_count > ATH12K_MAX_SOCS) {
1556 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1557 				    device_count, ATH12K_MAX_SOCS);
1558 			of_node_put(next_wsi_dev);
1559 			return -EINVAL;
1560 		}
1561 	} while (wsi_dev != next_wsi_dev);
1562 
1563 	of_node_put(next_wsi_dev);
1564 	ag->num_devices = device_count;
1565 
1566 	return 0;
1567 }
1568 
ath12k_core_get_wsi_index(struct ath12k_hw_group * ag,struct ath12k_base * ab)1569 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1570 				     struct ath12k_base *ab)
1571 {
1572 	int i, wsi_controller_index = -1, node_index = -1;
1573 	bool control;
1574 
1575 	for (i = 0; i < ag->num_devices; i++) {
1576 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1577 		if (control)
1578 			wsi_controller_index = i;
1579 
1580 		if (ag->wsi_node[i] == ab->dev->of_node)
1581 			node_index = i;
1582 	}
1583 
1584 	if (wsi_controller_index == -1) {
1585 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1586 		return -EINVAL;
1587 	}
1588 
1589 	if (node_index == -1) {
1590 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1591 		return -EINVAL;
1592 	}
1593 
1594 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1595 		ag->num_devices;
1596 
1597 	return 0;
1598 }
1599 
ath12k_core_hw_group_assign(struct ath12k_base * ab)1600 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1601 {
1602 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1603 	struct ath12k_hw_group *ag;
1604 
1605 	lockdep_assert_held(&ath12k_hw_group_mutex);
1606 
1607 	/* The grouping of multiple devices will be done based on device tree file.
1608 	 * The platforms that do not have any valid group information would have
1609 	 * each device to be part of its own invalid group.
1610 	 *
1611 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1612 	 * which didn't have dt entry or wrong dt entry, there could be many
1613 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1614 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1615 	 * num devices in ath12k_hw_group determines if the group is
1616 	 * multi device or single device group
1617 	 */
1618 
1619 	ag = ath12k_core_hw_group_find_by_dt(ab);
1620 	if (!ag) {
1621 		ag = ath12k_core_hw_group_alloc(ab);
1622 		if (!ag) {
1623 			ath12k_warn(ab, "unable to create new hw group\n");
1624 			return NULL;
1625 		}
1626 
1627 		if (ath12k_core_get_wsi_info(ag, ab) ||
1628 		    ath12k_core_get_wsi_index(ag, ab)) {
1629 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1630 				   "unable to get wsi info from dt, grouping single device");
1631 			ag->id = ATH12K_INVALID_GROUP_ID;
1632 			ag->num_devices = 1;
1633 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1634 			wsi->index = 0;
1635 		}
1636 
1637 		goto exit;
1638 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1639 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1640 			   ag->id);
1641 		goto invalid_group;
1642 	} else {
1643 		if (ath12k_core_get_wsi_index(ag, ab))
1644 			goto invalid_group;
1645 		goto exit;
1646 	}
1647 
1648 invalid_group:
1649 	ag = ath12k_core_hw_group_alloc(ab);
1650 	if (!ag) {
1651 		ath12k_warn(ab, "unable to create new hw group\n");
1652 		return NULL;
1653 	}
1654 
1655 	ag->id = ATH12K_INVALID_GROUP_ID;
1656 	ag->num_devices = 1;
1657 	wsi->index = 0;
1658 
1659 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1660 
1661 exit:
1662 	if (ag->num_probed >= ag->num_devices) {
1663 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
1664 		goto invalid_group;
1665 	}
1666 
1667 	ab->device_id = ag->num_probed++;
1668 	ag->ab[ab->device_id] = ab;
1669 	ab->ag = ag;
1670 
1671 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
1672 		   ag->id, ag->num_devices, wsi->index);
1673 
1674 	return ag;
1675 }
1676 
ath12k_core_hw_group_unassign(struct ath12k_base * ab)1677 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
1678 {
1679 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1680 	u8 device_id = ab->device_id;
1681 	int num_probed;
1682 
1683 	if (!ag)
1684 		return;
1685 
1686 	mutex_lock(&ag->mutex);
1687 
1688 	if (WARN_ON(device_id >= ag->num_devices)) {
1689 		mutex_unlock(&ag->mutex);
1690 		return;
1691 	}
1692 
1693 	if (WARN_ON(ag->ab[device_id] != ab)) {
1694 		mutex_unlock(&ag->mutex);
1695 		return;
1696 	}
1697 
1698 	ag->ab[device_id] = NULL;
1699 	ab->ag = NULL;
1700 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
1701 
1702 	if (ag->num_probed)
1703 		ag->num_probed--;
1704 
1705 	num_probed = ag->num_probed;
1706 
1707 	mutex_unlock(&ag->mutex);
1708 
1709 	if (!num_probed)
1710 		ath12k_core_hw_group_free(ag);
1711 }
1712 
ath12k_core_hw_group_destroy(struct ath12k_hw_group * ag)1713 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
1714 {
1715 	struct ath12k_base *ab;
1716 	int i;
1717 
1718 	if (WARN_ON(!ag))
1719 		return;
1720 
1721 	for (i = 0; i < ag->num_devices; i++) {
1722 		ab = ag->ab[i];
1723 		if (!ab)
1724 			continue;
1725 
1726 		ath12k_core_soc_destroy(ab);
1727 	}
1728 }
1729 
ath12k_core_hw_group_cleanup(struct ath12k_hw_group * ag)1730 static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
1731 {
1732 	struct ath12k_base *ab;
1733 	int i;
1734 
1735 	if (!ag)
1736 		return;
1737 
1738 	mutex_lock(&ag->mutex);
1739 
1740 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1741 		mutex_unlock(&ag->mutex);
1742 		return;
1743 	}
1744 
1745 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
1746 
1747 	ath12k_core_hw_group_stop(ag);
1748 
1749 	for (i = 0; i < ag->num_devices; i++) {
1750 		ab = ag->ab[i];
1751 		if (!ab)
1752 			continue;
1753 
1754 		mutex_lock(&ab->core_lock);
1755 		ath12k_core_stop(ab);
1756 		mutex_unlock(&ab->core_lock);
1757 	}
1758 
1759 	mutex_unlock(&ag->mutex);
1760 }
1761 
ath12k_core_hw_group_create(struct ath12k_hw_group * ag)1762 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
1763 {
1764 	struct ath12k_base *ab;
1765 	int i, ret;
1766 
1767 	lockdep_assert_held(&ag->mutex);
1768 
1769 	for (i = 0; i < ag->num_devices; i++) {
1770 		ab = ag->ab[i];
1771 		if (!ab)
1772 			continue;
1773 
1774 		mutex_lock(&ab->core_lock);
1775 
1776 		ret = ath12k_core_soc_create(ab);
1777 		if (ret) {
1778 			mutex_unlock(&ab->core_lock);
1779 			ath12k_err(ab, "failed to create soc core: %d\n", ret);
1780 			return ret;
1781 		}
1782 
1783 		mutex_unlock(&ab->core_lock);
1784 	}
1785 
1786 	return 0;
1787 }
1788 
ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group * ag)1789 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
1790 {
1791 	struct ath12k_base *ab;
1792 	int i;
1793 
1794 	lockdep_assert_held(&ag->mutex);
1795 
1796 	/* If more than one devices are grouped, then inter MLO
1797 	 * functionality can work still independent of whether internally
1798 	 * each device supports single_chip_mlo or not.
1799 	 * Only when there is one device, then it depends whether the
1800 	 * device can support intra chip MLO or not
1801 	 */
1802 	if (ag->num_devices > 1) {
1803 		ag->mlo_capable = true;
1804 	} else {
1805 		ab = ag->ab[0];
1806 		ag->mlo_capable = ab->single_chip_mlo_supp;
1807 
1808 		/* WCN chipsets does not advertise in firmware features
1809 		 * hence skip checking
1810 		 */
1811 		if (ab->hw_params->def_num_link)
1812 			return;
1813 	}
1814 
1815 	if (!ag->mlo_capable)
1816 		return;
1817 
1818 	for (i = 0; i < ag->num_devices; i++) {
1819 		ab = ag->ab[i];
1820 		if (!ab)
1821 			continue;
1822 
1823 		/* even if 1 device's firmware feature indicates MLO
1824 		 * unsupported, make MLO unsupported for the whole group
1825 		 */
1826 		if (!test_bit(ATH12K_FW_FEATURE_MLO, ab->fw.fw_features)) {
1827 			ag->mlo_capable = false;
1828 			return;
1829 		}
1830 	}
1831 }
1832 
ath12k_core_init(struct ath12k_base * ab)1833 int ath12k_core_init(struct ath12k_base *ab)
1834 {
1835 	struct ath12k_hw_group *ag;
1836 	int ret;
1837 
1838 	ret = ath12k_core_panic_notifier_register(ab);
1839 	if (ret)
1840 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
1841 
1842 	mutex_lock(&ath12k_hw_group_mutex);
1843 
1844 	ag = ath12k_core_hw_group_assign(ab);
1845 	if (!ag) {
1846 		mutex_unlock(&ath12k_hw_group_mutex);
1847 		ath12k_warn(ab, "unable to get hw group\n");
1848 		return -ENODEV;
1849 	}
1850 
1851 	mutex_unlock(&ath12k_hw_group_mutex);
1852 
1853 	mutex_lock(&ag->mutex);
1854 
1855 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
1856 		   ag->num_devices, ag->num_probed);
1857 
1858 	if (ath12k_core_hw_group_create_ready(ag)) {
1859 		ret = ath12k_core_hw_group_create(ag);
1860 		if (ret) {
1861 			mutex_unlock(&ag->mutex);
1862 			ath12k_warn(ab, "unable to create hw group\n");
1863 			goto err;
1864 		}
1865 	}
1866 
1867 	mutex_unlock(&ag->mutex);
1868 
1869 	return 0;
1870 
1871 err:
1872 	ath12k_core_hw_group_destroy(ab->ag);
1873 	ath12k_core_hw_group_unassign(ab);
1874 	return ret;
1875 }
1876 
ath12k_core_deinit(struct ath12k_base * ab)1877 void ath12k_core_deinit(struct ath12k_base *ab)
1878 {
1879 	ath12k_core_panic_notifier_unregister(ab);
1880 	ath12k_core_hw_group_cleanup(ab->ag);
1881 	ath12k_core_hw_group_destroy(ab->ag);
1882 	ath12k_core_hw_group_unassign(ab);
1883 }
1884 
ath12k_core_free(struct ath12k_base * ab)1885 void ath12k_core_free(struct ath12k_base *ab)
1886 {
1887 	timer_delete_sync(&ab->rx_replenish_retry);
1888 	destroy_workqueue(ab->workqueue_aux);
1889 	destroy_workqueue(ab->workqueue);
1890 	kfree(ab);
1891 }
1892 
ath12k_core_alloc(struct device * dev,size_t priv_size,enum ath12k_bus bus)1893 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
1894 				      enum ath12k_bus bus)
1895 {
1896 	struct ath12k_base *ab;
1897 
1898 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
1899 	if (!ab)
1900 		return NULL;
1901 
1902 	init_completion(&ab->driver_recovery);
1903 
1904 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
1905 	if (!ab->workqueue)
1906 		goto err_sc_free;
1907 
1908 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
1909 	if (!ab->workqueue_aux)
1910 		goto err_free_wq;
1911 
1912 	mutex_init(&ab->core_lock);
1913 	spin_lock_init(&ab->base_lock);
1914 	init_completion(&ab->reset_complete);
1915 
1916 	INIT_LIST_HEAD(&ab->peers);
1917 	init_waitqueue_head(&ab->peer_mapping_wq);
1918 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
1919 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
1920 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
1921 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
1922 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
1923 
1924 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
1925 	init_completion(&ab->htc_suspend);
1926 	init_completion(&ab->restart_completed);
1927 	init_completion(&ab->wow.wakeup_completed);
1928 
1929 	ab->dev = dev;
1930 	ab->hif.bus = bus;
1931 	ab->qmi.num_radios = U8_MAX;
1932 	ab->single_chip_mlo_supp = false;
1933 
1934 	/* Device index used to identify the devices in a group.
1935 	 *
1936 	 * In Intra-device MLO, only one device present in a group,
1937 	 * so it is always zero.
1938 	 *
1939 	 * In Inter-device MLO, Multiple device present in a group,
1940 	 * expect non-zero value.
1941 	 */
1942 	ab->device_id = 0;
1943 
1944 	return ab;
1945 
1946 err_free_wq:
1947 	destroy_workqueue(ab->workqueue);
1948 err_sc_free:
1949 	kfree(ab);
1950 	return NULL;
1951 }
1952 
1953 MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
1954 MODULE_LICENSE("Dual BSD/GPL");
1955