1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2023 Realtek Corporation
3 */
4
5 #include "debug.h"
6 #include "efuse.h"
7 #include "mac.h"
8 #include "reg.h"
9
10 #define EFUSE_EXTERNALPN_ADDR_BE 0x1580
11 #define EFUSE_SERIALNUM_ADDR_BE 0x1581
12 #define EFUSE_SB_CRYP_SEL_ADDR 0x1582
13 #define EFUSE_SB_CRYP_SEL_SIZE 2
14 #define EFUSE_SB_CRYP_SEL_DEFAULT 0xFFFF
15 #define SB_SEL_MGN_MAX_SIZE 2
16 #define EFUSE_SEC_BE_START 0x1580
17 #define EFUSE_SEC_BE_SIZE 4
18
19 static const u32 sb_sel_mgn[SB_SEL_MGN_MAX_SIZE] = {
20 0x8000100, 0xC000180
21 };
22
rtw89_enable_efuse_pwr_cut_ddv_be(struct rtw89_dev * rtwdev)23 static void rtw89_enable_efuse_pwr_cut_ddv_be(struct rtw89_dev *rtwdev)
24 {
25 const struct rtw89_chip_info *chip = rtwdev->chip;
26 struct rtw89_hal *hal = &rtwdev->hal;
27 bool aphy_patch = true;
28
29 if (chip->chip_id == RTL8922A && hal->cv == CHIP_CAV)
30 aphy_patch = false;
31
32 rtw89_write8_set(rtwdev, R_BE_PMC_DBG_CTRL2, B_BE_SYSON_DIS_PMCR_BE_WRMSK);
33
34 if (aphy_patch) {
35 rtw89_write16_set(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_PWC_EV2EF_S);
36 mdelay(1);
37 rtw89_write16_set(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_PWC_EV2EF_B);
38 rtw89_write16_clr(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_ISO_EB2CORE);
39 }
40
41 rtw89_write32_set(rtwdev, R_BE_EFUSE_CTRL_2_V1, B_BE_EF_BURST);
42 }
43
rtw89_disable_efuse_pwr_cut_ddv_be(struct rtw89_dev * rtwdev)44 static void rtw89_disable_efuse_pwr_cut_ddv_be(struct rtw89_dev *rtwdev)
45 {
46 const struct rtw89_chip_info *chip = rtwdev->chip;
47 struct rtw89_hal *hal = &rtwdev->hal;
48 bool aphy_patch = true;
49
50 if (chip->chip_id == RTL8922A && hal->cv == CHIP_CAV)
51 aphy_patch = false;
52
53 if (aphy_patch) {
54 rtw89_write16_set(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_ISO_EB2CORE);
55 rtw89_write16_clr(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_PWC_EV2EF_B);
56 mdelay(1);
57 rtw89_write16_clr(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_PWC_EV2EF_S);
58 }
59
60 rtw89_write8_clr(rtwdev, R_BE_PMC_DBG_CTRL2, B_BE_SYSON_DIS_PMCR_BE_WRMSK);
61 rtw89_write32_clr(rtwdev, R_BE_EFUSE_CTRL_2_V1, B_BE_EF_BURST);
62 }
63
rtw89_dump_physical_efuse_map_ddv_be(struct rtw89_dev * rtwdev,u8 * map,u32 dump_addr,u32 dump_size)64 static int rtw89_dump_physical_efuse_map_ddv_be(struct rtw89_dev *rtwdev, u8 *map,
65 u32 dump_addr, u32 dump_size)
66 {
67 u32 efuse_ctl;
68 u32 addr;
69 u32 data;
70 int ret;
71
72 if (!IS_ALIGNED(dump_addr, 4) || !IS_ALIGNED(dump_size, 4)) {
73 rtw89_err(rtwdev, "Efuse addr 0x%x or size 0x%x not aligned\n",
74 dump_addr, dump_size);
75 return -EINVAL;
76 }
77
78 rtw89_enable_efuse_pwr_cut_ddv_be(rtwdev);
79
80 for (addr = dump_addr; addr < dump_addr + dump_size; addr += 4, map += 4) {
81 efuse_ctl = u32_encode_bits(addr, B_BE_EF_ADDR_MASK);
82 rtw89_write32(rtwdev, R_BE_EFUSE_CTRL, efuse_ctl & ~B_BE_EF_RDY);
83
84 ret = read_poll_timeout_atomic(rtw89_read32, efuse_ctl,
85 efuse_ctl & B_BE_EF_RDY, 1, 1000000,
86 true, rtwdev, R_BE_EFUSE_CTRL);
87 if (ret)
88 return -EBUSY;
89
90 data = rtw89_read32(rtwdev, R_BE_EFUSE_CTRL_1_V1);
91 *((__le32 *)map) = cpu_to_le32(data);
92 }
93
94 rtw89_disable_efuse_pwr_cut_ddv_be(rtwdev);
95
96 return 0;
97 }
98
rtw89_dump_physical_efuse_map_dav_be(struct rtw89_dev * rtwdev,u8 * map,u32 dump_addr,u32 dump_size)99 static int rtw89_dump_physical_efuse_map_dav_be(struct rtw89_dev *rtwdev, u8 *map,
100 u32 dump_addr, u32 dump_size)
101 {
102 u32 addr;
103 u8 val8;
104 int err;
105 int ret;
106
107 for (addr = dump_addr; addr < dump_addr + dump_size; addr++) {
108 ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0x40,
109 FULL_BIT_MASK);
110 if (ret)
111 return ret;
112 ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_LOW_ADDR, addr & 0xff,
113 XTAL_SI_LOW_ADDR_MASK);
114 if (ret)
115 return ret;
116 ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, addr >> 8,
117 XTAL_SI_HIGH_ADDR_MASK);
118 if (ret)
119 return ret;
120 ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0,
121 XTAL_SI_MODE_SEL_MASK);
122 if (ret)
123 return ret;
124
125 ret = read_poll_timeout_atomic(rtw89_mac_read_xtal_si, err,
126 !err && (val8 & XTAL_SI_RDY),
127 1, 10000, false,
128 rtwdev, XTAL_SI_CTRL, &val8);
129 if (ret) {
130 rtw89_warn(rtwdev, "failed to read dav efuse\n");
131 return ret;
132 }
133
134 ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_READ_VAL, &val8);
135 if (ret)
136 return ret;
137 *map++ = val8;
138 }
139
140 return 0;
141 }
142
rtw89_cnv_efuse_state_be(struct rtw89_dev * rtwdev,bool idle)143 int rtw89_cnv_efuse_state_be(struct rtw89_dev *rtwdev, bool idle)
144 {
145 u32 val;
146 int ret = 0;
147
148 if (idle) {
149 rtw89_write32_set(rtwdev, R_BE_WL_BT_PWR_CTRL, B_BE_BT_DISN_EN);
150 } else {
151 rtw89_write32_clr(rtwdev, R_BE_WL_BT_PWR_CTRL, B_BE_BT_DISN_EN);
152
153 ret = read_poll_timeout(rtw89_read32_mask, val,
154 val == MAC_AX_SYS_ACT, 50, 5000,
155 false, rtwdev, R_BE_IC_PWR_STATE,
156 B_BE_WHOLE_SYS_PWR_STE_MASK);
157 if (ret)
158 rtw89_warn(rtwdev, "failed to convert efuse state\n");
159 }
160
161 return ret;
162 }
163
rtw89_dump_physical_efuse_map_be(struct rtw89_dev * rtwdev,u8 * map,u32 dump_addr,u32 dump_size,bool dav)164 static int rtw89_dump_physical_efuse_map_be(struct rtw89_dev *rtwdev, u8 *map,
165 u32 dump_addr, u32 dump_size, bool dav)
166 {
167 int ret;
168
169 if (!map || dump_size == 0)
170 return 0;
171
172 rtw89_cnv_efuse_state_be(rtwdev, false);
173
174 if (dav) {
175 ret = rtw89_dump_physical_efuse_map_dav_be(rtwdev, map,
176 dump_addr, dump_size);
177 if (ret)
178 return ret;
179
180 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "phy_map dav: ", map, dump_size);
181 } else {
182 ret = rtw89_dump_physical_efuse_map_ddv_be(rtwdev, map,
183 dump_addr, dump_size);
184 if (ret)
185 return ret;
186
187 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "phy_map ddv: ", map, dump_size);
188 }
189
190 rtw89_cnv_efuse_state_be(rtwdev, true);
191
192 return 0;
193 }
194
195 #define EFUSE_HDR_CONST_MASK GENMASK(23, 20)
196 #define EFUSE_HDR_PAGE_MASK GENMASK(19, 17)
197 #define EFUSE_HDR_OFFSET_MASK GENMASK(16, 4)
198 #define EFUSE_HDR_OFFSET_DAV_MASK GENMASK(11, 4)
199 #define EFUSE_HDR_WORD_EN_MASK GENMASK(3, 0)
200
201 #define invalid_efuse_header_be(hdr1, hdr2, hdr3) \
202 ((hdr1) == 0xff || (hdr2) == 0xff || (hdr3) == 0xff)
203 #define invalid_efuse_content_be(word_en, i) \
204 (((word_en) & BIT(i)) != 0x0)
205 #define get_efuse_blk_idx_be(hdr1, hdr2, hdr3) \
206 (((hdr1) << 16) | ((hdr2) << 8) | (hdr3))
207 #define block_idx_to_logical_idx_be(blk_idx, i) \
208 (((blk_idx) << 3) + ((i) << 1))
209
210 #define invalid_efuse_header_dav_be(hdr1, hdr2) \
211 ((hdr1) == 0xff || (hdr2) == 0xff)
212 #define get_efuse_blk_idx_dav_be(hdr1, hdr2) \
213 (((hdr1) << 8) | (hdr2))
214
rtw89_eeprom_parser_be(struct rtw89_dev * rtwdev,const u8 * phy_map,u32 phy_size,u8 * log_map,const struct rtw89_efuse_block_cfg * efuse_block)215 static int rtw89_eeprom_parser_be(struct rtw89_dev *rtwdev,
216 const u8 *phy_map, u32 phy_size, u8 *log_map,
217 const struct rtw89_efuse_block_cfg *efuse_block)
218 {
219 const struct rtw89_chip_info *chip = rtwdev->chip;
220 enum rtw89_efuse_block blk_page, page;
221 u32 size = efuse_block->size;
222 u32 phy_idx, log_idx;
223 u32 hdr, page_offset;
224 u8 hdr1, hdr2, hdr3;
225 u8 i, val0, val1;
226 u32 min, max;
227 u16 blk_idx;
228 u8 word_en;
229
230 page = u32_get_bits(efuse_block->offset, RTW89_EFUSE_BLOCK_ID_MASK);
231 page_offset = u32_get_bits(efuse_block->offset, RTW89_EFUSE_BLOCK_SIZE_MASK);
232
233 min = ALIGN_DOWN(page_offset, 2);
234 max = ALIGN(page_offset + size, 2);
235
236 memset(log_map, 0xff, size);
237
238 phy_idx = chip->sec_ctrl_efuse_size;
239
240 do {
241 if (page == RTW89_EFUSE_BLOCK_ADIE) {
242 hdr1 = phy_map[phy_idx];
243 hdr2 = phy_map[phy_idx + 1];
244 if (invalid_efuse_header_dav_be(hdr1, hdr2))
245 break;
246
247 phy_idx += 2;
248
249 hdr = get_efuse_blk_idx_dav_be(hdr1, hdr2);
250
251 blk_page = RTW89_EFUSE_BLOCK_ADIE;
252 blk_idx = u32_get_bits(hdr, EFUSE_HDR_OFFSET_DAV_MASK);
253 word_en = u32_get_bits(hdr, EFUSE_HDR_WORD_EN_MASK);
254 } else {
255 hdr1 = phy_map[phy_idx];
256 hdr2 = phy_map[phy_idx + 1];
257 hdr3 = phy_map[phy_idx + 2];
258 if (invalid_efuse_header_be(hdr1, hdr2, hdr3))
259 break;
260
261 phy_idx += 3;
262
263 hdr = get_efuse_blk_idx_be(hdr1, hdr2, hdr3);
264
265 blk_page = u32_get_bits(hdr, EFUSE_HDR_PAGE_MASK);
266 blk_idx = u32_get_bits(hdr, EFUSE_HDR_OFFSET_MASK);
267 word_en = u32_get_bits(hdr, EFUSE_HDR_WORD_EN_MASK);
268 }
269
270 if (blk_idx >= RTW89_EFUSE_MAX_BLOCK_SIZE >> 3) {
271 rtw89_err(rtwdev, "[ERR]efuse idx:0x%X\n", phy_idx - 3);
272 rtw89_err(rtwdev, "[ERR]read hdr:0x%X\n", hdr);
273 return -EINVAL;
274 }
275
276 for (i = 0; i < 4; i++) {
277 if (invalid_efuse_content_be(word_en, i))
278 continue;
279
280 if (phy_idx >= phy_size - 1)
281 return -EINVAL;
282
283 log_idx = block_idx_to_logical_idx_be(blk_idx, i);
284
285 if (blk_page == page && log_idx >= min && log_idx < max) {
286 val0 = phy_map[phy_idx];
287 val1 = phy_map[phy_idx + 1];
288
289 if (log_idx == min && page_offset > min) {
290 log_map[log_idx - page_offset + 1] = val1;
291 } else if (log_idx + 2 == max &&
292 page_offset + size < max) {
293 log_map[log_idx - page_offset] = val0;
294 } else {
295 log_map[log_idx - page_offset] = val0;
296 log_map[log_idx - page_offset + 1] = val1;
297 }
298 }
299 phy_idx += 2;
300 }
301 } while (phy_idx < phy_size);
302
303 return 0;
304 }
305
rtw89_parse_logical_efuse_block_be(struct rtw89_dev * rtwdev,const u8 * phy_map,u32 phy_size,enum rtw89_efuse_block block)306 static int rtw89_parse_logical_efuse_block_be(struct rtw89_dev *rtwdev,
307 const u8 *phy_map, u32 phy_size,
308 enum rtw89_efuse_block block)
309 {
310 const struct rtw89_chip_info *chip = rtwdev->chip;
311 const struct rtw89_efuse_block_cfg *efuse_block;
312 u8 *log_map;
313 int ret;
314
315 efuse_block = &chip->efuse_blocks[block];
316
317 log_map = kmalloc(efuse_block->size, GFP_KERNEL);
318 if (!log_map)
319 return -ENOMEM;
320
321 ret = rtw89_eeprom_parser_be(rtwdev, phy_map, phy_size, log_map, efuse_block);
322 if (ret) {
323 rtw89_warn(rtwdev, "failed to dump efuse logical block %d\n", block);
324 goto out_free;
325 }
326
327 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, efuse_block->size);
328
329 ret = rtwdev->chip->ops->read_efuse(rtwdev, log_map, block);
330 if (ret) {
331 rtw89_warn(rtwdev, "failed to read efuse map\n");
332 goto out_free;
333 }
334
335 out_free:
336 kfree(log_map);
337
338 return ret;
339 }
340
rtw89_parse_efuse_map_be(struct rtw89_dev * rtwdev)341 int rtw89_parse_efuse_map_be(struct rtw89_dev *rtwdev)
342 {
343 u32 phy_size = rtwdev->chip->physical_efuse_size;
344 u32 dav_phy_size = rtwdev->chip->dav_phy_efuse_size;
345 enum rtw89_efuse_block block;
346 u8 *phy_map = NULL;
347 u8 *dav_phy_map = NULL;
348 int ret;
349
350 if (rtw89_read16(rtwdev, R_BE_SYS_WL_EFUSE_CTRL) & B_BE_AUTOLOAD_SUS)
351 rtwdev->efuse.valid = true;
352 else
353 rtw89_warn(rtwdev, "failed to check efuse autoload\n");
354
355 phy_map = kmalloc(phy_size, GFP_KERNEL);
356 if (dav_phy_size)
357 dav_phy_map = kmalloc(dav_phy_size, GFP_KERNEL);
358
359 if (!phy_map || (dav_phy_size && !dav_phy_map)) {
360 ret = -ENOMEM;
361 goto out_free;
362 }
363
364 ret = rtw89_dump_physical_efuse_map_be(rtwdev, phy_map, 0, phy_size, false);
365 if (ret) {
366 rtw89_warn(rtwdev, "failed to dump efuse physical map\n");
367 goto out_free;
368 }
369 ret = rtw89_dump_physical_efuse_map_be(rtwdev, dav_phy_map, 0, dav_phy_size, true);
370 if (ret) {
371 rtw89_warn(rtwdev, "failed to dump efuse dav physical map\n");
372 goto out_free;
373 }
374
375 if (rtwdev->hci.type == RTW89_HCI_TYPE_USB)
376 block = RTW89_EFUSE_BLOCK_HCI_DIG_USB;
377 else
378 block = RTW89_EFUSE_BLOCK_HCI_DIG_PCIE_SDIO;
379
380 ret = rtw89_parse_logical_efuse_block_be(rtwdev, phy_map, phy_size, block);
381 if (ret) {
382 rtw89_warn(rtwdev, "failed to parse efuse logic block %d\n",
383 RTW89_EFUSE_BLOCK_HCI_DIG_PCIE_SDIO);
384 goto out_free;
385 }
386
387 ret = rtw89_parse_logical_efuse_block_be(rtwdev, phy_map, phy_size,
388 RTW89_EFUSE_BLOCK_RF);
389 if (ret) {
390 rtw89_warn(rtwdev, "failed to parse efuse logic block %d\n",
391 RTW89_EFUSE_BLOCK_RF);
392 goto out_free;
393 }
394
395 out_free:
396 kfree(dav_phy_map);
397 kfree(phy_map);
398
399 return ret;
400 }
401
rtw89_parse_phycap_map_be(struct rtw89_dev * rtwdev)402 int rtw89_parse_phycap_map_be(struct rtw89_dev *rtwdev)
403 {
404 u32 phycap_addr = rtwdev->chip->phycap_addr;
405 u32 phycap_size = rtwdev->chip->phycap_size;
406 u8 *phycap_map = NULL;
407 int ret = 0;
408
409 if (!phycap_size)
410 return 0;
411
412 phycap_map = kmalloc(phycap_size, GFP_KERNEL);
413 if (!phycap_map)
414 return -ENOMEM;
415
416 ret = rtw89_dump_physical_efuse_map_be(rtwdev, phycap_map,
417 phycap_addr, phycap_size, false);
418 if (ret) {
419 rtw89_warn(rtwdev, "failed to dump phycap map\n");
420 goto out_free;
421 }
422
423 ret = rtwdev->chip->ops->read_phycap(rtwdev, phycap_map);
424 if (ret) {
425 rtw89_warn(rtwdev, "failed to read phycap map\n");
426 goto out_free;
427 }
428
429 out_free:
430 kfree(phycap_map);
431
432 return ret;
433 }
434
get_sb_cryp_sel_idx(u16 sb_cryp_sel)435 static u16 get_sb_cryp_sel_idx(u16 sb_cryp_sel)
436 {
437 u8 low_bit, high_bit, cnt_zero = 0;
438 u8 idx, sel_form_v, sel_idx_v;
439 u16 sb_cryp_sel_v = 0x0;
440
441 sel_form_v = u16_get_bits(sb_cryp_sel, MASKBYTE0);
442 sel_idx_v = u16_get_bits(sb_cryp_sel, MASKBYTE1);
443
444 for (idx = 0; idx < 4; idx++) {
445 low_bit = !!(sel_form_v & BIT(idx));
446 high_bit = !!(sel_form_v & BIT(7 - idx));
447 if (low_bit != high_bit)
448 return U16_MAX;
449 if (low_bit)
450 continue;
451
452 cnt_zero++;
453 if (cnt_zero == 1)
454 sb_cryp_sel_v = idx * 16;
455 else if (cnt_zero > 1)
456 return U16_MAX;
457 }
458
459 low_bit = u8_get_bits(sel_idx_v, 0x0F);
460 high_bit = u8_get_bits(sel_idx_v, 0xF0);
461
462 if ((low_bit ^ high_bit) != 0xF)
463 return U16_MAX;
464
465 return sb_cryp_sel_v + low_bit;
466 }
467
rtw89_efuse_read_fw_secure_be(struct rtw89_dev * rtwdev)468 int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev)
469 {
470 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
471 u32 sec_addr = EFUSE_SEC_BE_START;
472 u32 sec_size = EFUSE_SEC_BE_SIZE;
473 u16 sb_cryp_sel, sb_cryp_sel_idx;
474 u8 sec_map[EFUSE_SEC_BE_SIZE];
475 u8 b1, b2;
476 int ret;
477
478 ret = rtw89_dump_physical_efuse_map_be(rtwdev, sec_map,
479 sec_addr, sec_size, false);
480 if (ret) {
481 rtw89_warn(rtwdev, "failed to dump secsel map\n");
482 return ret;
483 }
484
485 sb_cryp_sel = sec_map[EFUSE_SB_CRYP_SEL_ADDR - sec_addr] |
486 sec_map[EFUSE_SB_CRYP_SEL_ADDR - sec_addr + 1] << 8;
487 if (sb_cryp_sel == EFUSE_SB_CRYP_SEL_DEFAULT)
488 goto out;
489
490 sb_cryp_sel_idx = get_sb_cryp_sel_idx(sb_cryp_sel);
491 if (sb_cryp_sel_idx >= SB_SEL_MGN_MAX_SIZE) {
492 rtw89_warn(rtwdev, "invalid SB cryp sel idx %d\n", sb_cryp_sel_idx);
493 goto out;
494 }
495
496 sec->sb_sel_mgn = sb_sel_mgn[sb_cryp_sel_idx];
497
498 b1 = sec_map[EFUSE_EXTERNALPN_ADDR_BE - sec_addr];
499 b2 = sec_map[EFUSE_SERIALNUM_ADDR_BE - sec_addr];
500
501 ret = rtw89_efuse_recognize_mss_info_v1(rtwdev, b1, b2);
502 if (ret)
503 goto out;
504
505 sec->secure_boot = true;
506
507 out:
508 rtw89_debug(rtwdev, RTW89_DBG_FW,
509 "MSS secure_boot=%d dev_type=%d cust_idx=%d key_num=%d\n",
510 sec->secure_boot, sec->mss_dev_type, sec->mss_cust_idx,
511 sec->mss_key_num);
512
513 return 0;
514 }
515