1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2020 Realtek Corporation
3 */
4
5 #include <linux/pci.h>
6
7 #include "mac.h"
8 #include "pci.h"
9 #include "reg.h"
10 #include "ser.h"
11
12 static bool rtw89_pci_disable_clkreq;
13 static bool rtw89_pci_disable_aspm_l1;
14 static bool rtw89_pci_disable_l1ss;
15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
21
rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev * rtwdev,u32 * phy_offset)22 static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev,
23 u32 *phy_offset)
24 {
25 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
26 struct pci_dev *pdev = rtwpci->pdev;
27 u32 val;
28 int ret;
29
30 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
31 if (ret)
32 return ret;
33
34 val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK);
35 if (val == RTW89_PCIE_GEN1_SPEED) {
36 *phy_offset = R_RAC_DIRECT_OFFSET_G1;
37 } else if (val == RTW89_PCIE_GEN2_SPEED) {
38 *phy_offset = R_RAC_DIRECT_OFFSET_G2;
39 } else {
40 rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val);
41 return -EFAULT;
42 }
43
44 return 0;
45 }
46
rtw89_pci_rst_bdram_ax(struct rtw89_dev * rtwdev)47 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev)
48 {
49 u32 val;
50 int ret;
51
52 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM);
53
54 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
55 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
56 rtwdev, R_AX_PCIE_INIT_CFG1);
57
58 return ret;
59 }
60
rtw89_pci_dma_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_dma_ring * bd_ring,u32 cur_idx,bool tx)61 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
62 struct rtw89_pci_dma_ring *bd_ring,
63 u32 cur_idx, bool tx)
64 {
65 const struct rtw89_pci_info *info = rtwdev->pci_info;
66 u32 cnt, cur_rp, wp, rp, len;
67
68 rp = bd_ring->rp;
69 wp = bd_ring->wp;
70 len = bd_ring->len;
71
72 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
73 if (tx) {
74 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
75 } else {
76 if (info->rx_ring_eq_is_full)
77 wp += 1;
78
79 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
80 }
81
82 bd_ring->rp = cur_rp;
83
84 return cnt;
85 }
86
rtw89_pci_txbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)87 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
88 struct rtw89_pci_tx_ring *tx_ring)
89 {
90 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
91 u32 addr_idx = bd_ring->addr.idx;
92 u32 cnt, idx;
93
94 idx = rtw89_read32(rtwdev, addr_idx);
95 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true);
96
97 return cnt;
98 }
99
rtw89_pci_release_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,u32 cnt,bool release_all)100 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
101 struct rtw89_pci *rtwpci,
102 u32 cnt, bool release_all)
103 {
104 struct rtw89_pci_tx_data *tx_data;
105 struct sk_buff *skb;
106 u32 qlen;
107
108 while (cnt--) {
109 skb = skb_dequeue(&rtwpci->h2c_queue);
110 if (!skb) {
111 rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
112 return;
113 }
114 skb_queue_tail(&rtwpci->h2c_release_queue, skb);
115 }
116
117 qlen = skb_queue_len(&rtwpci->h2c_release_queue);
118 if (!release_all)
119 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
120
121 while (qlen--) {
122 skb = skb_dequeue(&rtwpci->h2c_release_queue);
123 if (!skb) {
124 rtw89_err(rtwdev, "failed to release fwcmd\n");
125 return;
126 }
127 tx_data = RTW89_PCI_TX_SKB_CB(skb);
128 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
129 DMA_TO_DEVICE);
130 dev_kfree_skb_any(skb);
131 }
132 }
133
rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)134 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
135 struct rtw89_pci *rtwpci)
136 {
137 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
138 u32 cnt;
139
140 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
141 if (!cnt)
142 return;
143 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false);
144 }
145
rtw89_pci_rxbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)146 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
147 struct rtw89_pci_rx_ring *rx_ring)
148 {
149 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
150 u32 addr_idx = bd_ring->addr.idx;
151 u32 cnt, idx;
152
153 idx = rtw89_read32(rtwdev, addr_idx);
154 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false);
155
156 return cnt;
157 }
158
rtw89_pci_sync_skb_for_cpu(struct rtw89_dev * rtwdev,struct sk_buff * skb)159 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
160 struct sk_buff *skb)
161 {
162 struct rtw89_pci_rx_info *rx_info;
163 dma_addr_t dma;
164
165 rx_info = RTW89_PCI_RX_SKB_CB(skb);
166 dma = rx_info->dma;
167 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
168 DMA_FROM_DEVICE);
169 }
170
rtw89_pci_sync_skb_for_device(struct rtw89_dev * rtwdev,struct sk_buff * skb)171 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
172 struct sk_buff *skb)
173 {
174 struct rtw89_pci_rx_info *rx_info;
175 dma_addr_t dma;
176
177 rx_info = RTW89_PCI_RX_SKB_CB(skb);
178 dma = rx_info->dma;
179 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
180 DMA_FROM_DEVICE);
181 }
182
rtw89_pci_rxbd_info_update(struct rtw89_dev * rtwdev,struct sk_buff * skb)183 static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
184 struct sk_buff *skb)
185 {
186 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
187 struct rtw89_pci_rxbd_info *rxbd_info;
188 __le32 info;
189
190 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
191 info = rxbd_info->dword;
192
193 rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS);
194 rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS);
195 rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE);
196 rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG);
197 }
198
rtw89_pci_validate_rx_tag(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb)199 static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
200 struct rtw89_pci_rx_ring *rx_ring,
201 struct sk_buff *skb)
202 {
203 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
204 const struct rtw89_pci_info *info = rtwdev->pci_info;
205 u32 target_rx_tag;
206
207 if (!info->check_rx_tag)
208 return 0;
209
210 /* valid range is 1 ~ 0x1FFF */
211 if (rx_ring->target_rx_tag == 0)
212 target_rx_tag = 1;
213 else
214 target_rx_tag = rx_ring->target_rx_tag;
215
216 if (rx_info->tag != target_rx_tag) {
217 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n",
218 rx_info->tag, target_rx_tag);
219 return -EAGAIN;
220 }
221
222 return 0;
223 }
224
225 static
rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb)226 int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
227 struct rtw89_pci_rx_ring *rx_ring,
228 struct sk_buff *skb)
229 {
230 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
231 int rx_tag_retry = 100;
232 int ret;
233
234 do {
235 rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
236 rtw89_pci_rxbd_info_update(rtwdev, skb);
237
238 ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
239 if (ret != -EAGAIN)
240 break;
241 } while (rx_tag_retry--);
242
243 /* update target rx_tag for next RX */
244 rx_ring->target_rx_tag = rx_info->tag + 1;
245
246 return ret;
247 }
248
rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev * rtwdev,bool enable)249 static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable)
250 {
251 const struct rtw89_pci_info *info = rtwdev->pci_info;
252 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
253 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
254
255 if (enable) {
256 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask);
257 if (dma_stop2->addr)
258 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask);
259 } else {
260 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask);
261 if (dma_stop2->addr)
262 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask);
263 }
264 }
265
rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev * rtwdev,bool enable)266 static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable)
267 {
268 const struct rtw89_pci_info *info = rtwdev->pci_info;
269 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
270
271 if (enable)
272 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
273 else
274 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
275 }
276
277 static bool
rtw89_skb_put_rx_data(struct rtw89_dev * rtwdev,bool fs,bool ls,struct sk_buff * new,const struct sk_buff * skb,u32 offset,const struct rtw89_pci_rx_info * rx_info,const struct rtw89_rx_desc_info * desc_info)278 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
279 struct sk_buff *new,
280 const struct sk_buff *skb, u32 offset,
281 const struct rtw89_pci_rx_info *rx_info,
282 const struct rtw89_rx_desc_info *desc_info)
283 {
284 u32 copy_len = rx_info->len - offset;
285
286 if (unlikely(skb_tailroom(new) < copy_len)) {
287 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
288 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
289 rx_info->len, desc_info->pkt_size, offset, fs, ls);
290 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ",
291 skb->data, rx_info->len);
292 /* length of a single segment skb is desc_info->pkt_size */
293 if (fs && ls) {
294 copy_len = desc_info->pkt_size;
295 } else {
296 rtw89_info(rtwdev, "drop rx data due to invalid length\n");
297 return false;
298 }
299 }
300
301 skb_put_data(new, skb->data + offset, copy_len);
302
303 return true;
304 }
305
rtw89_pci_get_rx_skb_idx(struct rtw89_dev * rtwdev,struct rtw89_pci_dma_ring * bd_ring)306 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev,
307 struct rtw89_pci_dma_ring *bd_ring)
308 {
309 const struct rtw89_pci_info *info = rtwdev->pci_info;
310 u32 wp = bd_ring->wp;
311
312 if (!info->rx_ring_eq_is_full)
313 return wp;
314
315 if (++wp >= bd_ring->len)
316 wp = 0;
317
318 return wp;
319 }
320
rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)321 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
322 struct rtw89_pci_rx_ring *rx_ring)
323 {
324 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
325 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
326 const struct rtw89_pci_info *info = rtwdev->pci_info;
327 struct sk_buff *new = rx_ring->diliver_skb;
328 struct rtw89_pci_rx_info *rx_info;
329 struct sk_buff *skb;
330 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
331 u32 skb_idx;
332 u32 offset;
333 u32 cnt = 1;
334 bool fs, ls;
335 int ret;
336
337 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
338 skb = rx_ring->buf[skb_idx];
339
340 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
341 if (ret) {
342 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
343 bd_ring->wp, ret);
344 goto err_sync_device;
345 }
346
347 rx_info = RTW89_PCI_RX_SKB_CB(skb);
348 fs = info->no_rxbd_fs ? !new : rx_info->fs;
349 ls = rx_info->ls;
350
351 if (unlikely(!fs || !ls))
352 rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
353 "unexpected fs/ls=%d/%d tag=%u len=%u new->len=%u\n",
354 fs, ls, rx_info->tag, rx_info->len, new ? new->len : 0);
355
356 if (fs) {
357 if (new) {
358 rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
359 "skb should not be ready before first segment start\n");
360 goto err_sync_device;
361 }
362 if (desc_info->ready) {
363 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
364 goto err_sync_device;
365 }
366
367 rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
368
369 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size);
370 if (!new)
371 goto err_sync_device;
372
373 rx_ring->diliver_skb = new;
374
375 /* first segment has RX desc */
376 offset = desc_info->offset + desc_info->rxd_len;
377 } else {
378 offset = sizeof(struct rtw89_pci_rxbd_info);
379 if (!new) {
380 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n");
381 goto err_sync_device;
382 }
383 }
384 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
385 goto err_sync_device;
386 rtw89_pci_sync_skb_for_device(rtwdev, skb);
387 rtw89_pci_rxbd_increase(rx_ring, 1);
388
389 if (!desc_info->ready) {
390 rtw89_warn(rtwdev, "no rx desc information\n");
391 goto err_free_resource;
392 }
393 if (ls) {
394 rtw89_core_rx(rtwdev, desc_info, new);
395 rx_ring->diliver_skb = NULL;
396 desc_info->ready = false;
397 }
398
399 return cnt;
400
401 err_sync_device:
402 rtw89_pci_sync_skb_for_device(rtwdev, skb);
403 rtw89_pci_rxbd_increase(rx_ring, 1);
404 err_free_resource:
405 if (new)
406 dev_kfree_skb_any(new);
407 rx_ring->diliver_skb = NULL;
408 desc_info->ready = false;
409
410 return cnt;
411 }
412
rtw89_pci_rxbd_deliver(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)413 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
414 struct rtw89_pci_rx_ring *rx_ring,
415 u32 cnt)
416 {
417 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
418 u32 rx_cnt;
419
420 while (cnt && rtwdev->napi_budget_countdown > 0) {
421 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
422 if (!rx_cnt) {
423 rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
424
425 /* skip the rest RXBD bufs */
426 rtw89_pci_rxbd_increase(rx_ring, cnt);
427 break;
428 }
429
430 cnt -= rx_cnt;
431 }
432
433 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
434 }
435
rtw89_pci_poll_rxq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)436 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
437 struct rtw89_pci *rtwpci, int budget)
438 {
439 struct rtw89_pci_rx_ring *rx_ring;
440 int countdown = rtwdev->napi_budget_countdown;
441 u32 cnt;
442
443 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
444
445 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
446 if (!cnt)
447 return 0;
448
449 cnt = min_t(u32, budget, cnt);
450
451 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
452
453 /* In case of flushing pending SKBs, the countdown may exceed. */
454 if (rtwdev->napi_budget_countdown <= 0)
455 return budget;
456
457 return budget - countdown;
458 }
459
rtw89_pci_tx_status(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct sk_buff * skb,u8 tx_status)460 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
461 struct rtw89_pci_tx_ring *tx_ring,
462 struct sk_buff *skb, u8 tx_status)
463 {
464 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
465 struct ieee80211_tx_info *info;
466
467 rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE);
468
469 info = IEEE80211_SKB_CB(skb);
470 ieee80211_tx_info_clear_status(info);
471
472 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
473 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
474 if (tx_status == RTW89_TX_DONE) {
475 info->flags |= IEEE80211_TX_STAT_ACK;
476 tx_ring->tx_acked++;
477 } else {
478 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
479 rtw89_debug(rtwdev, RTW89_DBG_FW,
480 "failed to TX of status %x\n", tx_status);
481 switch (tx_status) {
482 case RTW89_TX_RETRY_LIMIT:
483 tx_ring->tx_retry_lmt++;
484 break;
485 case RTW89_TX_LIFE_TIME:
486 tx_ring->tx_life_time++;
487 break;
488 case RTW89_TX_MACID_DROP:
489 tx_ring->tx_mac_id_drop++;
490 break;
491 default:
492 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
493 break;
494 }
495 }
496
497 ieee80211_tx_status_ni(rtwdev->hw, skb);
498 }
499
rtw89_pci_reclaim_txbd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)500 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
501 {
502 struct rtw89_pci_tx_wd *txwd;
503 u32 cnt;
504
505 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
506 while (cnt--) {
507 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
508 if (!txwd) {
509 rtw89_warn(rtwdev, "No busy txwd pages available\n");
510 break;
511 }
512
513 list_del_init(&txwd->list);
514
515 /* this skb has been freed by RPP */
516 if (skb_queue_len(&txwd->queue) == 0)
517 rtw89_pci_enqueue_txwd(tx_ring, txwd);
518 }
519 }
520
rtw89_pci_release_busy_txwd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)521 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
522 struct rtw89_pci_tx_ring *tx_ring)
523 {
524 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
525 struct rtw89_pci_tx_wd *txwd;
526 int i;
527
528 for (i = 0; i < wd_ring->page_num; i++) {
529 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
530 if (!txwd)
531 break;
532
533 list_del_init(&txwd->list);
534 }
535 }
536
rtw89_pci_release_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,u16 seq,u8 tx_status)537 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
538 struct rtw89_pci_tx_ring *tx_ring,
539 struct rtw89_pci_tx_wd *txwd, u16 seq,
540 u8 tx_status)
541 {
542 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
543 struct rtw89_pci_tx_data *tx_data;
544 struct sk_buff *skb, *tmp;
545 u8 txch = tx_ring->txch;
546
547 if (!list_empty(&txwd->list)) {
548 rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
549 /* In low power mode, RPP can receive before updating of TX BD.
550 * In normal mode, it should not happen so give it a warning.
551 */
552 if (!rtwpci->low_power && !list_empty(&txwd->list))
553 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
554 txch, seq);
555 }
556
557 skb_queue_walk_safe(&txwd->queue, skb, tmp) {
558 skb_unlink(skb, &txwd->queue);
559
560 tx_data = RTW89_PCI_TX_SKB_CB(skb);
561 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
562 DMA_TO_DEVICE);
563
564 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
565 }
566
567 if (list_empty(&txwd->list))
568 rtw89_pci_enqueue_txwd(tx_ring, txwd);
569 }
570
rtw89_pci_release_rpp(struct rtw89_dev * rtwdev,struct rtw89_pci_rpp_fmt * rpp)571 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
572 struct rtw89_pci_rpp_fmt *rpp)
573 {
574 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
575 struct rtw89_pci_tx_ring *tx_ring;
576 struct rtw89_pci_tx_wd_ring *wd_ring;
577 struct rtw89_pci_tx_wd *txwd;
578 u16 seq;
579 u8 qsel, tx_status, txch;
580
581 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
582 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
583 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
584 txch = rtw89_core_get_ch_dma(rtwdev, qsel);
585
586 if (txch == RTW89_TXCH_CH12) {
587 rtw89_warn(rtwdev, "should no fwcmd release report\n");
588 return;
589 }
590
591 tx_ring = &rtwpci->tx_rings[txch];
592 wd_ring = &tx_ring->wd_ring;
593 txwd = &wd_ring->pages[seq];
594
595 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
596 }
597
rtw89_pci_release_pending_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)598 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
599 struct rtw89_pci_tx_ring *tx_ring)
600 {
601 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
602 struct rtw89_pci_tx_wd *txwd;
603 int i;
604
605 for (i = 0; i < wd_ring->page_num; i++) {
606 txwd = &wd_ring->pages[i];
607
608 if (!list_empty(&txwd->list))
609 continue;
610
611 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
612 }
613 }
614
rtw89_pci_release_tx_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 max_cnt)615 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
616 struct rtw89_pci_rx_ring *rx_ring,
617 u32 max_cnt)
618 {
619 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
620 struct rtw89_pci_rx_info *rx_info;
621 struct rtw89_pci_rpp_fmt *rpp;
622 struct rtw89_rx_desc_info desc_info = {};
623 struct sk_buff *skb;
624 u32 cnt = 0;
625 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
626 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
627 u32 skb_idx;
628 u32 offset;
629 int ret;
630
631 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
632 skb = rx_ring->buf[skb_idx];
633
634 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
635 if (ret) {
636 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
637 bd_ring->wp, ret);
638 goto err_sync_device;
639 }
640
641 rx_info = RTW89_PCI_RX_SKB_CB(skb);
642 if (!rx_info->fs || !rx_info->ls) {
643 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
644 return cnt;
645 }
646
647 rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
648
649 /* first segment has RX desc */
650 offset = desc_info.offset + desc_info.rxd_len;
651 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
652 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
653 rtw89_pci_release_rpp(rtwdev, rpp);
654 }
655
656 rtw89_pci_sync_skb_for_device(rtwdev, skb);
657 rtw89_pci_rxbd_increase(rx_ring, 1);
658 cnt++;
659
660 return cnt;
661
662 err_sync_device:
663 rtw89_pci_sync_skb_for_device(rtwdev, skb);
664 return 0;
665 }
666
rtw89_pci_release_tx(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)667 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
668 struct rtw89_pci_rx_ring *rx_ring,
669 u32 cnt)
670 {
671 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
672 u32 release_cnt;
673
674 while (cnt) {
675 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
676 if (!release_cnt) {
677 rtw89_err(rtwdev, "failed to release TX skbs\n");
678
679 /* skip the rest RXBD bufs */
680 rtw89_pci_rxbd_increase(rx_ring, cnt);
681 break;
682 }
683
684 cnt -= release_cnt;
685 }
686
687 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
688 }
689
rtw89_pci_poll_rpq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)690 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
691 struct rtw89_pci *rtwpci, int budget)
692 {
693 struct rtw89_pci_rx_ring *rx_ring;
694 u32 cnt;
695 int work_done;
696
697 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
698
699 spin_lock_bh(&rtwpci->trx_lock);
700
701 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
702 if (cnt == 0)
703 goto out_unlock;
704
705 rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
706
707 out_unlock:
708 spin_unlock_bh(&rtwpci->trx_lock);
709
710 /* always release all RPQ */
711 work_done = min_t(int, cnt, budget);
712 rtwdev->napi_budget_countdown -= work_done;
713
714 return work_done;
715 }
716
rtw89_pci_isr_rxd_unavail(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)717 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
718 struct rtw89_pci *rtwpci)
719 {
720 struct rtw89_pci_rx_ring *rx_ring;
721 struct rtw89_pci_dma_ring *bd_ring;
722 u32 reg_idx;
723 u16 hw_idx, hw_idx_next, host_idx;
724 int i;
725
726 for (i = 0; i < RTW89_RXCH_NUM; i++) {
727 rx_ring = &rtwpci->rx_rings[i];
728 bd_ring = &rx_ring->bd_ring;
729
730 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
731 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
732 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
733 hw_idx_next = (hw_idx + 1) % bd_ring->len;
734
735 if (hw_idx_next == host_idx)
736 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i);
737
738 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
739 "%d RXD unavailable, idx=0x%08x, len=%d\n",
740 i, reg_idx, bd_ring->len);
741 }
742 }
743
rtw89_pci_recognize_intrs(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)744 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
745 struct rtw89_pci *rtwpci,
746 struct rtw89_pci_isrs *isrs)
747 {
748 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
749 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
750 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
751
752 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
753 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
754 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
755 }
756 EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
757
rtw89_pci_recognize_intrs_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)758 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
759 struct rtw89_pci *rtwpci,
760 struct rtw89_pci_isrs *isrs)
761 {
762 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
763 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
764 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
765 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
766 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
767 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
768 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
769
770 if (isrs->halt_c2h_isrs)
771 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
772 if (isrs->isrs[0])
773 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
774 if (isrs->isrs[1])
775 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
776 }
777 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
778
rtw89_pci_recognize_intrs_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)779 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
780 struct rtw89_pci *rtwpci,
781 struct rtw89_pci_isrs *isrs)
782 {
783 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
784 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
785 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
786 isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
787 rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
788 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
789
790 if (isrs->halt_c2h_isrs)
791 rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
792 if (isrs->isrs[0])
793 rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]);
794 if (isrs->isrs[1])
795 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]);
796 rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs);
797 }
798 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2);
799
rtw89_pci_enable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)800 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
801 {
802 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
803 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
804 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
805 }
806 EXPORT_SYMBOL(rtw89_pci_enable_intr);
807
rtw89_pci_disable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)808 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
809 {
810 rtw89_write32(rtwdev, R_AX_HIMR0, 0);
811 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
812 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
813 }
814 EXPORT_SYMBOL(rtw89_pci_disable_intr);
815
rtw89_pci_enable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)816 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
817 {
818 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
819 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
820 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
821 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
822 }
823 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
824
rtw89_pci_disable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)825 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
826 {
827 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
828 }
829 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
830
rtw89_pci_enable_intr_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)831 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
832 {
833 rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs);
834 rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]);
835 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]);
836 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs);
837 }
838 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2);
839
rtw89_pci_disable_intr_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)840 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
841 {
842 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0);
843 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0);
844 }
845 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2);
846
rtw89_pci_ops_recovery_start(struct rtw89_dev * rtwdev)847 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
848 {
849 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
850 unsigned long flags;
851
852 spin_lock_irqsave(&rtwpci->irq_lock, flags);
853 rtw89_chip_disable_intr(rtwdev, rtwpci);
854 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
855 rtw89_chip_enable_intr(rtwdev, rtwpci);
856 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
857 }
858
rtw89_pci_ops_recovery_complete(struct rtw89_dev * rtwdev)859 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
860 {
861 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
862 unsigned long flags;
863
864 spin_lock_irqsave(&rtwpci->irq_lock, flags);
865 rtw89_chip_disable_intr(rtwdev, rtwpci);
866 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
867 rtw89_chip_enable_intr(rtwdev, rtwpci);
868 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
869 }
870
rtw89_pci_low_power_interrupt_handler(struct rtw89_dev * rtwdev)871 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
872 {
873 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
874 int budget = NAPI_POLL_WEIGHT;
875
876 /* To prevent RXQ get stuck due to run out of budget. */
877 rtwdev->napi_budget_countdown = budget;
878
879 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
880 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
881 }
882
rtw89_pci_interrupt_threadfn(int irq,void * dev)883 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
884 {
885 struct rtw89_dev *rtwdev = dev;
886 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
887 const struct rtw89_pci_info *info = rtwdev->pci_info;
888 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
889 struct rtw89_pci_isrs isrs;
890 unsigned long flags;
891
892 spin_lock_irqsave(&rtwpci->irq_lock, flags);
893 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
894 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
895
896 if (unlikely(isrs.isrs[0] & gen_def->isr_rdu))
897 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
898
899 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h))
900 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
901
902 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout))
903 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
904
905 if (unlikely(rtwpci->under_recovery))
906 goto enable_intr;
907
908 if (unlikely(rtwpci->low_power)) {
909 rtw89_pci_low_power_interrupt_handler(rtwdev);
910 goto enable_intr;
911 }
912
913 if (likely(rtwpci->running)) {
914 local_bh_disable();
915 napi_schedule(&rtwdev->napi);
916 local_bh_enable();
917 }
918
919 return IRQ_HANDLED;
920
921 enable_intr:
922 spin_lock_irqsave(&rtwpci->irq_lock, flags);
923 if (likely(rtwpci->running))
924 rtw89_chip_enable_intr(rtwdev, rtwpci);
925 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
926 return IRQ_HANDLED;
927 }
928
rtw89_pci_interrupt_handler(int irq,void * dev)929 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
930 {
931 struct rtw89_dev *rtwdev = dev;
932 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
933 unsigned long flags;
934 irqreturn_t irqret = IRQ_WAKE_THREAD;
935
936 spin_lock_irqsave(&rtwpci->irq_lock, flags);
937
938 /* If interrupt event is on the road, it is still trigger interrupt
939 * even we have done pci_stop() to turn off IMR.
940 */
941 if (unlikely(!rtwpci->running)) {
942 irqret = IRQ_HANDLED;
943 goto exit;
944 }
945
946 rtw89_chip_disable_intr(rtwdev, rtwpci);
947 exit:
948 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
949
950 return irqret;
951 }
952
953 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \
954 [RTW89_TXCH_##ch_idx] = { \
955 .num = R_##gen##_##txch##_TXBD_NUM ##v, \
956 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \
957 .bdram = 0, \
958 .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \
959 .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \
960 }
961
962 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
963 [RTW89_TXCH_##txch] = { \
964 .num = R_AX_##txch##_TXBD_NUM ##v, \
965 .idx = R_AX_##txch##_TXBD_IDX ##v, \
966 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
967 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
968 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
969 }
970
971 #define DEF_TXCHADDRS(info, txch, v...) \
972 [RTW89_TXCH_##txch] = { \
973 .num = R_AX_##txch##_TXBD_NUM, \
974 .idx = R_AX_##txch##_TXBD_IDX, \
975 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
976 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
977 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
978 }
979
980 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \
981 [RTW89_RXCH_##ch_idx] = { \
982 .num = R_##gen##_##rxch##_RXBD_NUM ##v, \
983 .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \
984 .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \
985 .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \
986 }
987
988 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
989 .tx = {
990 DEF_TXCHADDRS(info, ACH0),
991 DEF_TXCHADDRS(info, ACH1),
992 DEF_TXCHADDRS(info, ACH2),
993 DEF_TXCHADDRS(info, ACH3),
994 DEF_TXCHADDRS(info, ACH4),
995 DEF_TXCHADDRS(info, ACH5),
996 DEF_TXCHADDRS(info, ACH6),
997 DEF_TXCHADDRS(info, ACH7),
998 DEF_TXCHADDRS(info, CH8),
999 DEF_TXCHADDRS(info, CH9),
1000 DEF_TXCHADDRS_TYPE1(info, CH10),
1001 DEF_TXCHADDRS_TYPE1(info, CH11),
1002 DEF_TXCHADDRS(info, CH12),
1003 },
1004 .rx = {
1005 DEF_RXCHADDRS(AX, RXQ, RXQ),
1006 DEF_RXCHADDRS(AX, RPQ, RPQ),
1007 },
1008 };
1009 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
1010
1011 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
1012 .tx = {
1013 DEF_TXCHADDRS(info, ACH0, _V1),
1014 DEF_TXCHADDRS(info, ACH1, _V1),
1015 DEF_TXCHADDRS(info, ACH2, _V1),
1016 DEF_TXCHADDRS(info, ACH3, _V1),
1017 DEF_TXCHADDRS(info, ACH4, _V1),
1018 DEF_TXCHADDRS(info, ACH5, _V1),
1019 DEF_TXCHADDRS(info, ACH6, _V1),
1020 DEF_TXCHADDRS(info, ACH7, _V1),
1021 DEF_TXCHADDRS(info, CH8, _V1),
1022 DEF_TXCHADDRS(info, CH9, _V1),
1023 DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
1024 DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
1025 DEF_TXCHADDRS(info, CH12, _V1),
1026 },
1027 .rx = {
1028 DEF_RXCHADDRS(AX, RXQ, RXQ, _V1),
1029 DEF_RXCHADDRS(AX, RPQ, RPQ, _V1),
1030 },
1031 };
1032 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
1033
1034 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = {
1035 .tx = {
1036 DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1),
1037 DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1),
1038 DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1),
1039 DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1),
1040 DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1),
1041 DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1),
1042 DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1),
1043 DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1),
1044 DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1),
1045 DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1),
1046 DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1),
1047 DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1),
1048 DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1),
1049 },
1050 .rx = {
1051 DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1),
1052 DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1),
1053 },
1054 };
1055 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be);
1056
1057 #undef DEF_TXCHADDRS_TYPE1
1058 #undef DEF_TXCHADDRS
1059 #undef DEF_RXCHADDRS
1060
rtw89_pci_get_txch_addrs(struct rtw89_dev * rtwdev,enum rtw89_tx_channel txch,const struct rtw89_pci_ch_dma_addr ** addr)1061 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
1062 enum rtw89_tx_channel txch,
1063 const struct rtw89_pci_ch_dma_addr **addr)
1064 {
1065 const struct rtw89_pci_info *info = rtwdev->pci_info;
1066
1067 if (txch >= RTW89_TXCH_NUM)
1068 return -EINVAL;
1069
1070 *addr = &info->dma_addr_set->tx[txch];
1071
1072 return 0;
1073 }
1074
rtw89_pci_get_rxch_addrs(struct rtw89_dev * rtwdev,enum rtw89_rx_channel rxch,const struct rtw89_pci_ch_dma_addr ** addr)1075 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
1076 enum rtw89_rx_channel rxch,
1077 const struct rtw89_pci_ch_dma_addr **addr)
1078 {
1079 const struct rtw89_pci_info *info = rtwdev->pci_info;
1080
1081 if (rxch >= RTW89_RXCH_NUM)
1082 return -EINVAL;
1083
1084 *addr = &info->dma_addr_set->rx[rxch];
1085
1086 return 0;
1087 }
1088
rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring * ring)1089 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
1090 {
1091 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
1092
1093 /* reserved 1 desc check ring is full or not */
1094 if (bd_ring->rp > bd_ring->wp)
1095 return bd_ring->rp - bd_ring->wp - 1;
1096
1097 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
1098 }
1099
1100 static
__rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev * rtwdev)1101 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
1102 {
1103 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1104 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
1105 u32 cnt;
1106
1107 spin_lock_bh(&rtwpci->trx_lock);
1108 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
1109 cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1110 spin_unlock_bh(&rtwpci->trx_lock);
1111
1112 return cnt;
1113 }
1114
1115 static
__rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev * rtwdev,u8 txch)1116 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
1117 u8 txch)
1118 {
1119 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1120 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1121 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1122 u32 cnt;
1123
1124 spin_lock_bh(&rtwpci->trx_lock);
1125 cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1126 if (txch != RTW89_TXCH_CH12)
1127 cnt = min(cnt, wd_ring->curr_num);
1128 spin_unlock_bh(&rtwpci->trx_lock);
1129
1130 return cnt;
1131 }
1132
__rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)1133 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1134 u8 txch)
1135 {
1136 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1137 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1138 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1139 const struct rtw89_chip_info *chip = rtwdev->chip;
1140 u32 bd_cnt, wd_cnt, min_cnt = 0;
1141 struct rtw89_pci_rx_ring *rx_ring;
1142 enum rtw89_debug_mask debug_mask;
1143 u32 cnt;
1144
1145 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
1146
1147 spin_lock_bh(&rtwpci->trx_lock);
1148 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1149 wd_cnt = wd_ring->curr_num;
1150
1151 if (wd_cnt == 0 || bd_cnt == 0) {
1152 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
1153 if (cnt)
1154 rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
1155 else if (wd_cnt == 0)
1156 goto out_unlock;
1157
1158 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1159 if (bd_cnt == 0)
1160 rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
1161 }
1162
1163 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1164 wd_cnt = wd_ring->curr_num;
1165 min_cnt = min(bd_cnt, wd_cnt);
1166 if (min_cnt == 0) {
1167 /* This message can be frequently shown in low power mode or
1168 * high traffic with small FIFO chips, and we have recognized it as normal
1169 * behavior, so print with mask RTW89_DBG_TXRX in these situations.
1170 */
1171 if (rtwpci->low_power || chip->small_fifo_size)
1172 debug_mask = RTW89_DBG_TXRX;
1173 else
1174 debug_mask = RTW89_DBG_UNEXP;
1175
1176 rtw89_debug(rtwdev, debug_mask,
1177 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
1178 wd_cnt, bd_cnt);
1179 }
1180
1181 out_unlock:
1182 spin_unlock_bh(&rtwpci->trx_lock);
1183
1184 return min_cnt;
1185 }
1186
rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)1187 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1188 u8 txch)
1189 {
1190 if (rtwdev->hci.paused)
1191 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
1192
1193 if (txch == RTW89_TXCH_CH12)
1194 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
1195
1196 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
1197 }
1198
__rtw89_pci_tx_kick_off(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1199 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
1200 {
1201 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1202 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1203 u32 host_idx, addr;
1204
1205 spin_lock_bh(&rtwpci->trx_lock);
1206
1207 addr = bd_ring->addr.idx;
1208 host_idx = bd_ring->wp;
1209 rtw89_write16(rtwdev, addr, host_idx);
1210
1211 spin_unlock_bh(&rtwpci->trx_lock);
1212 }
1213
rtw89_pci_tx_bd_ring_update(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,int n_txbd)1214 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
1215 int n_txbd)
1216 {
1217 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1218 u32 host_idx, len;
1219
1220 len = bd_ring->len;
1221 host_idx = bd_ring->wp + n_txbd;
1222 host_idx = host_idx < len ? host_idx : host_idx - len;
1223
1224 bd_ring->wp = host_idx;
1225 }
1226
rtw89_pci_ops_tx_kick_off(struct rtw89_dev * rtwdev,u8 txch)1227 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
1228 {
1229 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1230 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1231
1232 if (rtwdev->hci.paused) {
1233 set_bit(txch, rtwpci->kick_map);
1234 return;
1235 }
1236
1237 __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1238 }
1239
rtw89_pci_tx_kick_off_pending(struct rtw89_dev * rtwdev)1240 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
1241 {
1242 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1243 struct rtw89_pci_tx_ring *tx_ring;
1244 int txch;
1245
1246 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1247 if (!test_and_clear_bit(txch, rtwpci->kick_map))
1248 continue;
1249
1250 tx_ring = &rtwpci->tx_rings[txch];
1251 __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1252 }
1253 }
1254
__pci_flush_txch(struct rtw89_dev * rtwdev,u8 txch,bool drop)1255 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
1256 {
1257 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1258 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1259 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1260 u32 cur_idx, cur_rp;
1261 u8 i;
1262
1263 /* Because the time taked by the I/O is a bit dynamic, it's hard to
1264 * define a reasonable fixed total timeout to use read_poll_timeout*
1265 * helper. Instead, we can ensure a reasonable polling times, so we
1266 * just use for loop with udelay here.
1267 */
1268 for (i = 0; i < 60; i++) {
1269 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
1270 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
1271 if (cur_rp == bd_ring->wp)
1272 return;
1273
1274 udelay(1);
1275 }
1276
1277 if (!drop)
1278 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
1279 }
1280
__rtw89_pci_ops_flush_txchs(struct rtw89_dev * rtwdev,u32 txchs,bool drop)1281 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
1282 bool drop)
1283 {
1284 const struct rtw89_pci_info *info = rtwdev->pci_info;
1285 u8 i;
1286
1287 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1288 /* It may be unnecessary to flush FWCMD queue. */
1289 if (i == RTW89_TXCH_CH12)
1290 continue;
1291 if (info->tx_dma_ch_mask & BIT(i))
1292 continue;
1293
1294 if (txchs & BIT(i))
1295 __pci_flush_txch(rtwdev, i, drop);
1296 }
1297 }
1298
rtw89_pci_ops_flush_queues(struct rtw89_dev * rtwdev,u32 queues,bool drop)1299 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
1300 bool drop)
1301 {
1302 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
1303 }
1304
rtw89_pci_fill_txaddr_info(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1305 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
1306 void *txaddr_info_addr, u32 total_len,
1307 dma_addr_t dma, u8 *add_info_nr)
1308 {
1309 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
1310 __le16 option;
1311
1312 txaddr_info->length = cpu_to_le16(total_len);
1313 option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1));
1314 option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK);
1315 txaddr_info->option = option;
1316 txaddr_info->dma = cpu_to_le32(dma);
1317
1318 *add_info_nr = 1;
1319
1320 return sizeof(*txaddr_info);
1321 }
1322 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
1323
rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1324 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
1325 void *txaddr_info_addr, u32 total_len,
1326 dma_addr_t dma, u8 *add_info_nr)
1327 {
1328 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
1329 u32 remain = total_len;
1330 u32 len;
1331 u16 length_option;
1332 int n;
1333
1334 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
1335 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
1336 TXADDR_INFO_LENTHG_V1_MAX : remain;
1337 remain -= len;
1338
1339 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
1340 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
1341 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
1342 length_option |= u16_encode_bits(upper_32_bits(dma),
1343 B_PCIADDR_HIGH_SEL_V1_MASK);
1344 txaddr_info->length_opt = cpu_to_le16(length_option);
1345 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
1346 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
1347
1348 dma += len;
1349 txaddr_info++;
1350 }
1351
1352 WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
1353 remain, total_len);
1354
1355 *add_info_nr = n;
1356
1357 return n * sizeof(*txaddr_info);
1358 }
1359 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
1360
rtw89_pci_txwd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,struct rtw89_core_tx_request * tx_req)1361 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
1362 struct rtw89_pci_tx_ring *tx_ring,
1363 struct rtw89_pci_tx_wd *txwd,
1364 struct rtw89_core_tx_request *tx_req)
1365 {
1366 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1367 const struct rtw89_chip_info *chip = rtwdev->chip;
1368 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1369 struct rtw89_pci_tx_wp_info *txwp_info;
1370 void *txaddr_info_addr;
1371 struct pci_dev *pdev = rtwpci->pdev;
1372 struct sk_buff *skb = tx_req->skb;
1373 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1374 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
1375 bool en_wd_info = desc_info->en_wd_info;
1376 u32 txwd_len;
1377 u32 txwp_len;
1378 u32 txaddr_info_len;
1379 dma_addr_t dma;
1380 int ret;
1381
1382 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1383 if (dma_mapping_error(&pdev->dev, dma)) {
1384 rtw89_err(rtwdev, "failed to map skb dma data\n");
1385 ret = -EBUSY;
1386 goto err;
1387 }
1388
1389 tx_data->dma = dma;
1390 rcu_assign_pointer(skb_data->wait, NULL);
1391
1392 txwp_len = sizeof(*txwp_info);
1393 txwd_len = chip->txwd_body_size;
1394 txwd_len += en_wd_info ? chip->txwd_info_size : 0;
1395
1396 txwp_info = txwd->vaddr + txwd_len;
1397 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
1398 txwp_info->seq1 = 0;
1399 txwp_info->seq2 = 0;
1400 txwp_info->seq3 = 0;
1401
1402 tx_ring->tx_cnt++;
1403 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
1404 txaddr_info_len =
1405 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
1406 dma, &desc_info->addr_info_nr);
1407
1408 txwd->len = txwd_len + txwp_len + txaddr_info_len;
1409
1410 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
1411
1412 skb_queue_tail(&txwd->queue, skb);
1413
1414 return 0;
1415
1416 err:
1417 return ret;
1418 }
1419
rtw89_pci_fwcmd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1420 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
1421 struct rtw89_pci_tx_ring *tx_ring,
1422 struct rtw89_pci_tx_bd_32 *txbd,
1423 struct rtw89_core_tx_request *tx_req)
1424 {
1425 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1426 const struct rtw89_chip_info *chip = rtwdev->chip;
1427 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1428 void *txdesc;
1429 int txdesc_size = chip->h2c_desc_size;
1430 struct pci_dev *pdev = rtwpci->pdev;
1431 struct sk_buff *skb = tx_req->skb;
1432 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1433 dma_addr_t dma;
1434 __le16 opt;
1435
1436 txdesc = skb_push(skb, txdesc_size);
1437 memset(txdesc, 0, txdesc_size);
1438 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
1439
1440 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1441 if (dma_mapping_error(&pdev->dev, dma)) {
1442 rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
1443 return -EBUSY;
1444 }
1445
1446 tx_data->dma = dma;
1447 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
1448 opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI);
1449 txbd->opt = opt;
1450 txbd->length = cpu_to_le16(skb->len);
1451 txbd->dma = cpu_to_le32(tx_data->dma);
1452 skb_queue_tail(&rtwpci->h2c_queue, skb);
1453
1454 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1455
1456 return 0;
1457 }
1458
rtw89_pci_txbd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1459 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
1460 struct rtw89_pci_tx_ring *tx_ring,
1461 struct rtw89_pci_tx_bd_32 *txbd,
1462 struct rtw89_core_tx_request *tx_req)
1463 {
1464 struct rtw89_pci_tx_wd *txwd;
1465 __le16 opt;
1466 int ret;
1467
1468 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
1469 * buffer with WD BODY only. So here we don't need to check the free
1470 * pages of the wd ring.
1471 */
1472 if (tx_ring->txch == RTW89_TXCH_CH12)
1473 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
1474
1475 txwd = rtw89_pci_dequeue_txwd(tx_ring);
1476 if (!txwd) {
1477 rtw89_err(rtwdev, "no available TXWD\n");
1478 ret = -ENOSPC;
1479 goto err;
1480 }
1481
1482 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
1483 if (ret) {
1484 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
1485 goto err_enqueue_wd;
1486 }
1487
1488 list_add_tail(&txwd->list, &tx_ring->busy_pages);
1489
1490 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
1491 opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI);
1492 txbd->opt = opt;
1493 txbd->length = cpu_to_le16(txwd->len);
1494 txbd->dma = cpu_to_le32(txwd->paddr);
1495
1496 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1497
1498 return 0;
1499
1500 err_enqueue_wd:
1501 rtw89_pci_enqueue_txwd(tx_ring, txwd);
1502 err:
1503 return ret;
1504 }
1505
rtw89_pci_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req,u8 txch)1506 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
1507 u8 txch)
1508 {
1509 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1510 struct rtw89_pci_tx_ring *tx_ring;
1511 struct rtw89_pci_tx_bd_32 *txbd;
1512 u32 n_avail_txbd;
1513 int ret = 0;
1514
1515 /* check the tx type and dma channel for fw cmd queue */
1516 if ((txch == RTW89_TXCH_CH12 ||
1517 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
1518 (txch != RTW89_TXCH_CH12 ||
1519 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
1520 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
1521 return -EINVAL;
1522 }
1523
1524 tx_ring = &rtwpci->tx_rings[txch];
1525 spin_lock_bh(&rtwpci->trx_lock);
1526
1527 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
1528 if (n_avail_txbd == 0) {
1529 rtw89_err(rtwdev, "no available TXBD\n");
1530 ret = -ENOSPC;
1531 goto err_unlock;
1532 }
1533
1534 txbd = rtw89_pci_get_next_txbd(tx_ring);
1535 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
1536 if (ret) {
1537 rtw89_err(rtwdev, "failed to submit TXBD\n");
1538 goto err_unlock;
1539 }
1540
1541 spin_unlock_bh(&rtwpci->trx_lock);
1542 return 0;
1543
1544 err_unlock:
1545 spin_unlock_bh(&rtwpci->trx_lock);
1546 return ret;
1547 }
1548
rtw89_pci_ops_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)1549 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
1550 {
1551 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1552 int ret;
1553
1554 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
1555 if (ret) {
1556 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
1557 return ret;
1558 }
1559
1560 return 0;
1561 }
1562
1563 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
1564 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
1565 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
1566 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1567 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1568 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
1569 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
1570 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
1571 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
1572 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1},
1573 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1},
1574 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
1575 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
1576 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
1577 };
1578 EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
1579
1580 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
1581 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
1582 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
1583 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1584 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1585 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1},
1586 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1},
1587 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
1588 };
1589 EXPORT_SYMBOL(rtw89_bd_ram_table_single);
1590
rtw89_pci_init_wp_16sel(struct rtw89_dev * rtwdev)1591 static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev)
1592 {
1593 const struct rtw89_pci_info *info = rtwdev->pci_info;
1594 u32 addr = info->wp_sel_addr;
1595 u32 val;
1596 int i;
1597
1598 if (!info->wp_sel_addr)
1599 return;
1600
1601 for (i = 0; i < 16; i += 4) {
1602 val = u32_encode_bits(i + 0, MASKBYTE0) |
1603 u32_encode_bits(i + 1, MASKBYTE1) |
1604 u32_encode_bits(i + 2, MASKBYTE2) |
1605 u32_encode_bits(i + 3, MASKBYTE3);
1606 rtw89_write32(rtwdev, addr + i, val);
1607 }
1608 }
1609
rtw89_pci_reset_trx_rings(struct rtw89_dev * rtwdev)1610 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
1611 {
1612 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1613 const struct rtw89_pci_info *info = rtwdev->pci_info;
1614 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
1615 struct rtw89_pci_tx_ring *tx_ring;
1616 struct rtw89_pci_rx_ring *rx_ring;
1617 struct rtw89_pci_dma_ring *bd_ring;
1618 const struct rtw89_pci_bd_ram *bd_ram;
1619 u32 addr_num;
1620 u32 addr_idx;
1621 u32 addr_bdram;
1622 u32 addr_desa_l;
1623 u32 val32;
1624 int i;
1625
1626 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1627 if (info->tx_dma_ch_mask & BIT(i))
1628 continue;
1629
1630 tx_ring = &rtwpci->tx_rings[i];
1631 bd_ring = &tx_ring->bd_ring;
1632 bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL;
1633 addr_num = bd_ring->addr.num;
1634 addr_bdram = bd_ring->addr.bdram;
1635 addr_desa_l = bd_ring->addr.desa_l;
1636 bd_ring->wp = 0;
1637 bd_ring->rp = 0;
1638
1639 rtw89_write16(rtwdev, addr_num, bd_ring->len);
1640 if (addr_bdram && bd_ram) {
1641 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
1642 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
1643 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
1644
1645 rtw89_write32(rtwdev, addr_bdram, val32);
1646 }
1647 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1648 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
1649 }
1650
1651 for (i = 0; i < RTW89_RXCH_NUM; i++) {
1652 rx_ring = &rtwpci->rx_rings[i];
1653 bd_ring = &rx_ring->bd_ring;
1654 addr_num = bd_ring->addr.num;
1655 addr_idx = bd_ring->addr.idx;
1656 addr_desa_l = bd_ring->addr.desa_l;
1657 if (info->rx_ring_eq_is_full)
1658 bd_ring->wp = bd_ring->len - 1;
1659 else
1660 bd_ring->wp = 0;
1661 bd_ring->rp = 0;
1662 rx_ring->diliver_skb = NULL;
1663 rx_ring->diliver_desc.ready = false;
1664 rx_ring->target_rx_tag = 0;
1665
1666 rtw89_write16(rtwdev, addr_num, bd_ring->len);
1667 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1668 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
1669
1670 if (info->rx_ring_eq_is_full)
1671 rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
1672 }
1673
1674 rtw89_pci_init_wp_16sel(rtwdev);
1675 }
1676
rtw89_pci_release_tx_ring(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1677 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
1678 struct rtw89_pci_tx_ring *tx_ring)
1679 {
1680 rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
1681 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
1682 }
1683
rtw89_pci_ops_reset(struct rtw89_dev * rtwdev)1684 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
1685 {
1686 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1687 const struct rtw89_pci_info *info = rtwdev->pci_info;
1688 int txch;
1689
1690 rtw89_pci_reset_trx_rings(rtwdev);
1691
1692 spin_lock_bh(&rtwpci->trx_lock);
1693 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1694 if (info->tx_dma_ch_mask & BIT(txch))
1695 continue;
1696 if (txch == RTW89_TXCH_CH12) {
1697 rtw89_pci_release_fwcmd(rtwdev, rtwpci,
1698 skb_queue_len(&rtwpci->h2c_queue), true);
1699 continue;
1700 }
1701 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
1702 }
1703 spin_unlock_bh(&rtwpci->trx_lock);
1704 }
1705
rtw89_pci_enable_intr_lock(struct rtw89_dev * rtwdev)1706 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
1707 {
1708 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1709 unsigned long flags;
1710
1711 spin_lock_irqsave(&rtwpci->irq_lock, flags);
1712 rtwpci->running = true;
1713 rtw89_chip_enable_intr(rtwdev, rtwpci);
1714 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1715 }
1716
rtw89_pci_disable_intr_lock(struct rtw89_dev * rtwdev)1717 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
1718 {
1719 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1720 unsigned long flags;
1721
1722 spin_lock_irqsave(&rtwpci->irq_lock, flags);
1723 rtwpci->running = false;
1724 rtw89_chip_disable_intr(rtwdev, rtwpci);
1725 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1726 }
1727
rtw89_pci_ops_start(struct rtw89_dev * rtwdev)1728 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
1729 {
1730 rtw89_core_napi_start(rtwdev);
1731 rtw89_pci_enable_intr_lock(rtwdev);
1732
1733 return 0;
1734 }
1735
rtw89_pci_ops_stop(struct rtw89_dev * rtwdev)1736 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
1737 {
1738 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1739 struct pci_dev *pdev = rtwpci->pdev;
1740
1741 rtw89_pci_disable_intr_lock(rtwdev);
1742 synchronize_irq(pdev->irq);
1743 rtw89_core_napi_stop(rtwdev);
1744 }
1745
rtw89_pci_ops_pause(struct rtw89_dev * rtwdev,bool pause)1746 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
1747 {
1748 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1749 struct pci_dev *pdev = rtwpci->pdev;
1750
1751 if (pause) {
1752 rtw89_pci_disable_intr_lock(rtwdev);
1753 synchronize_irq(pdev->irq);
1754 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
1755 napi_synchronize(&rtwdev->napi);
1756 } else {
1757 rtw89_pci_enable_intr_lock(rtwdev);
1758 rtw89_pci_tx_kick_off_pending(rtwdev);
1759 }
1760 }
1761
1762 static
rtw89_pci_switch_bd_idx_addr(struct rtw89_dev * rtwdev,bool low_power)1763 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
1764 {
1765 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1766 const struct rtw89_pci_info *info = rtwdev->pci_info;
1767 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
1768 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
1769 struct rtw89_pci_tx_ring *tx_ring;
1770 struct rtw89_pci_rx_ring *rx_ring;
1771 int i;
1772
1773 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
1774 return;
1775
1776 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1777 tx_ring = &rtwpci->tx_rings[i];
1778 tx_ring->bd_ring.addr.idx = low_power ?
1779 bd_idx_addr->tx_bd_addrs[i] :
1780 dma_addr_set->tx[i].idx;
1781 }
1782
1783 for (i = 0; i < RTW89_RXCH_NUM; i++) {
1784 rx_ring = &rtwpci->rx_rings[i];
1785 rx_ring->bd_ring.addr.idx = low_power ?
1786 bd_idx_addr->rx_bd_addrs[i] :
1787 dma_addr_set->rx[i].idx;
1788 }
1789 }
1790
rtw89_pci_ops_switch_mode(struct rtw89_dev * rtwdev,bool low_power)1791 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
1792 {
1793 enum rtw89_pci_intr_mask_cfg cfg;
1794
1795 WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
1796
1797 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
1798 rtw89_chip_config_intr_mask(rtwdev, cfg);
1799 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
1800 }
1801
1802 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
1803
rtw89_pci_ops_read32_cmac(struct rtw89_dev * rtwdev,u32 addr)1804 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
1805 {
1806 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1807 u32 val = readl(rtwpci->mmap + addr);
1808 int count;
1809
1810 for (count = 0; ; count++) {
1811 if (val != RTW89_R32_DEAD)
1812 return val;
1813 if (count >= MAC_REG_POOL_COUNT) {
1814 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
1815 return RTW89_R32_DEAD;
1816 }
1817 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
1818 val = readl(rtwpci->mmap + addr);
1819 }
1820
1821 return val;
1822 }
1823
rtw89_pci_ops_read8(struct rtw89_dev * rtwdev,u32 addr)1824 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
1825 {
1826 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1827 u32 addr32, val32, shift;
1828
1829 if (!ACCESS_CMAC(addr))
1830 return readb(rtwpci->mmap + addr);
1831
1832 addr32 = addr & ~0x3;
1833 shift = (addr & 0x3) * 8;
1834 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1835 return val32 >> shift;
1836 }
1837
rtw89_pci_ops_read16(struct rtw89_dev * rtwdev,u32 addr)1838 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
1839 {
1840 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1841 u32 addr32, val32, shift;
1842
1843 if (!ACCESS_CMAC(addr))
1844 return readw(rtwpci->mmap + addr);
1845
1846 addr32 = addr & ~0x3;
1847 shift = (addr & 0x3) * 8;
1848 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1849 return val32 >> shift;
1850 }
1851
rtw89_pci_ops_read32(struct rtw89_dev * rtwdev,u32 addr)1852 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
1853 {
1854 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1855
1856 if (!ACCESS_CMAC(addr))
1857 return readl(rtwpci->mmap + addr);
1858
1859 return rtw89_pci_ops_read32_cmac(rtwdev, addr);
1860 }
1861
rtw89_pci_ops_write8(struct rtw89_dev * rtwdev,u32 addr,u8 data)1862 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
1863 {
1864 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1865
1866 writeb(data, rtwpci->mmap + addr);
1867 }
1868
rtw89_pci_ops_write16(struct rtw89_dev * rtwdev,u32 addr,u16 data)1869 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
1870 {
1871 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1872
1873 writew(data, rtwpci->mmap + addr);
1874 }
1875
rtw89_pci_ops_write32(struct rtw89_dev * rtwdev,u32 addr,u32 data)1876 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
1877 {
1878 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1879
1880 writel(data, rtwpci->mmap + addr);
1881 }
1882
rtw89_pci_ctrl_dma_trx(struct rtw89_dev * rtwdev,bool enable)1883 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
1884 {
1885 const struct rtw89_pci_info *info = rtwdev->pci_info;
1886
1887 if (enable)
1888 rtw89_write32_set(rtwdev, info->init_cfg_reg,
1889 info->rxhci_en_bit | info->txhci_en_bit);
1890 else
1891 rtw89_write32_clr(rtwdev, info->init_cfg_reg,
1892 info->rxhci_en_bit | info->txhci_en_bit);
1893 }
1894
rtw89_pci_ctrl_dma_io(struct rtw89_dev * rtwdev,bool enable)1895 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
1896 {
1897 const struct rtw89_pci_info *info = rtwdev->pci_info;
1898 const struct rtw89_reg_def *reg = &info->dma_io_stop;
1899
1900 if (enable)
1901 rtw89_write32_clr(rtwdev, reg->addr, reg->mask);
1902 else
1903 rtw89_write32_set(rtwdev, reg->addr, reg->mask);
1904 }
1905
rtw89_pci_ctrl_dma_all(struct rtw89_dev * rtwdev,bool enable)1906 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
1907 {
1908 rtw89_pci_ctrl_dma_io(rtwdev, enable);
1909 rtw89_pci_ctrl_dma_trx(rtwdev, enable);
1910 }
1911
rtw89_pci_check_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 rw_bit)1912 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
1913 {
1914 u16 val;
1915
1916 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
1917
1918 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
1919 switch (speed) {
1920 case PCIE_PHY_GEN1:
1921 if (addr < 0x20)
1922 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
1923 else
1924 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
1925 break;
1926 case PCIE_PHY_GEN2:
1927 if (addr < 0x20)
1928 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
1929 else
1930 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
1931 break;
1932 default:
1933 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
1934 return -EINVAL;
1935 }
1936 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
1937 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
1938
1939 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
1940 false, rtwdev, R_AX_MDIO_CFG);
1941 }
1942
1943 static int
rtw89_read16_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 * val)1944 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
1945 {
1946 int ret;
1947
1948 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
1949 if (ret) {
1950 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
1951 return ret;
1952 }
1953 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
1954
1955 return 0;
1956 }
1957
1958 static int
rtw89_write16_mdio(struct rtw89_dev * rtwdev,u8 addr,u16 data,u8 speed)1959 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
1960 {
1961 int ret;
1962
1963 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
1964 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
1965 if (ret) {
1966 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
1967 return ret;
1968 }
1969
1970 return 0;
1971 }
1972
1973 static int
rtw89_write16_mdio_mask(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u16 data,u8 speed)1974 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
1975 {
1976 u32 shift;
1977 int ret;
1978 u16 val;
1979
1980 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1981 if (ret)
1982 return ret;
1983
1984 shift = __ffs(mask);
1985 val &= ~mask;
1986 val |= ((data << shift) & mask);
1987
1988 ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
1989 if (ret)
1990 return ret;
1991
1992 return 0;
1993 }
1994
rtw89_write16_mdio_set(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)1995 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1996 {
1997 int ret;
1998 u16 val;
1999
2000 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
2001 if (ret)
2002 return ret;
2003 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
2004 if (ret)
2005 return ret;
2006
2007 return 0;
2008 }
2009
rtw89_write16_mdio_clr(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)2010 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
2011 {
2012 int ret;
2013 u16 val;
2014
2015 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
2016 if (ret)
2017 return ret;
2018 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
2019 if (ret)
2020 return ret;
2021
2022 return 0;
2023 }
2024
rtw89_dbi_write8(struct rtw89_dev * rtwdev,u16 addr,u8 data)2025 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
2026 {
2027 u16 addr_2lsb = addr & B_AX_DBI_2LSB;
2028 u16 write_addr;
2029 u8 flag;
2030 int ret;
2031
2032 write_addr = addr & B_AX_DBI_ADDR_MSK;
2033 write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK);
2034 rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data);
2035 rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
2036 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
2037
2038 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
2039 10 * RTW89_PCI_WR_RETRY_CNT, false,
2040 rtwdev, R_AX_DBI_FLAG + 2);
2041 if (ret)
2042 rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n",
2043 addr);
2044
2045 return ret;
2046 }
2047
rtw89_dbi_read8(struct rtw89_dev * rtwdev,u16 addr,u8 * value)2048 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
2049 {
2050 u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
2051 u8 flag;
2052 int ret;
2053
2054 rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
2055 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
2056
2057 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
2058 10 * RTW89_PCI_WR_RETRY_CNT, false,
2059 rtwdev, R_AX_DBI_FLAG + 2);
2060 if (ret) {
2061 rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n",
2062 addr);
2063 return ret;
2064 }
2065
2066 read_addr = R_AX_DBI_RDATA + (addr & 3);
2067 *value = rtw89_read8(rtwdev, read_addr);
2068
2069 return 0;
2070 }
2071
rtw89_pci_write_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 data)2072 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2073 u8 data)
2074 {
2075 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2076 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2077 struct pci_dev *pdev = rtwpci->pdev;
2078 int ret;
2079
2080 ret = pci_write_config_byte(pdev, addr, data);
2081 if (!ret)
2082 return 0;
2083
2084 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
2085 ret = rtw89_dbi_write8(rtwdev, addr, data);
2086
2087 return ret;
2088 }
2089
rtw89_pci_read_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 * value)2090 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2091 u8 *value)
2092 {
2093 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2094 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2095 struct pci_dev *pdev = rtwpci->pdev;
2096 int ret;
2097
2098 ret = pci_read_config_byte(pdev, addr, value);
2099 if (!ret)
2100 return 0;
2101
2102 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
2103 ret = rtw89_dbi_read8(rtwdev, addr, value);
2104
2105 return ret;
2106 }
2107
rtw89_pci_config_byte_set(struct rtw89_dev * rtwdev,u16 addr,u8 bit)2108 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
2109 u8 bit)
2110 {
2111 u8 value;
2112 int ret;
2113
2114 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
2115 if (ret)
2116 return ret;
2117
2118 value |= bit;
2119 ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
2120
2121 return ret;
2122 }
2123
rtw89_pci_config_byte_clr(struct rtw89_dev * rtwdev,u16 addr,u8 bit)2124 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
2125 u8 bit)
2126 {
2127 u8 value;
2128 int ret;
2129
2130 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
2131 if (ret)
2132 return ret;
2133
2134 value &= ~bit;
2135 ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
2136
2137 return ret;
2138 }
2139
2140 static int
__get_target(struct rtw89_dev * rtwdev,u16 * target,enum rtw89_pcie_phy phy_rate)2141 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
2142 {
2143 u16 val, tar;
2144 int ret;
2145
2146 /* Enable counter */
2147 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
2148 if (ret)
2149 return ret;
2150 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
2151 phy_rate);
2152 if (ret)
2153 return ret;
2154 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
2155 phy_rate);
2156 if (ret)
2157 return ret;
2158
2159 fsleep(300);
2160
2161 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
2162 if (ret)
2163 return ret;
2164 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
2165 phy_rate);
2166 if (ret)
2167 return ret;
2168
2169 tar = tar & 0x0FFF;
2170 if (tar == 0 || tar == 0x0FFF) {
2171 rtw89_err(rtwdev, "[ERR]Get target failed.\n");
2172 return -EINVAL;
2173 }
2174
2175 *target = tar;
2176
2177 return 0;
2178 }
2179
rtw89_pci_autok_x(struct rtw89_dev * rtwdev)2180 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
2181 {
2182 int ret;
2183
2184 if (!rtw89_is_rtl885xb(rtwdev))
2185 return 0;
2186
2187 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
2188 PCIE_AUTOK_4, PCIE_PHY_GEN1);
2189 return ret;
2190 }
2191
rtw89_pci_auto_refclk_cal(struct rtw89_dev * rtwdev,bool autook_en)2192 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
2193 {
2194 enum rtw89_pcie_phy phy_rate;
2195 u16 val16, mgn_set, div_set, tar;
2196 u8 val8, bdr_ori;
2197 bool l1_flag = false;
2198 int ret = 0;
2199
2200 if (!rtw89_is_rtl885xb(rtwdev))
2201 return 0;
2202
2203 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
2204 if (ret) {
2205 rtw89_err(rtwdev, "[ERR]pci config read %X\n",
2206 RTW89_PCIE_PHY_RATE);
2207 return ret;
2208 }
2209
2210 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
2211 phy_rate = PCIE_PHY_GEN1;
2212 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
2213 phy_rate = PCIE_PHY_GEN2;
2214 } else {
2215 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
2216 return -EOPNOTSUPP;
2217 }
2218 /* Disable L1BD */
2219 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
2220 if (ret) {
2221 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
2222 return ret;
2223 }
2224
2225 if (bdr_ori & RTW89_PCIE_BIT_L1) {
2226 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2227 bdr_ori & ~RTW89_PCIE_BIT_L1);
2228 if (ret) {
2229 rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2230 RTW89_PCIE_L1_CTRL);
2231 return ret;
2232 }
2233 l1_flag = true;
2234 }
2235
2236 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2237 if (ret) {
2238 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2239 goto end;
2240 }
2241
2242 if (val16 & B_AX_CALIB_EN) {
2243 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
2244 val16 & ~B_AX_CALIB_EN, phy_rate);
2245 if (ret) {
2246 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2247 goto end;
2248 }
2249 }
2250
2251 if (!autook_en)
2252 goto end;
2253 /* Set div */
2254 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
2255 if (ret) {
2256 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2257 goto end;
2258 }
2259
2260 /* Obtain div and margin */
2261 ret = __get_target(rtwdev, &tar, phy_rate);
2262 if (ret) {
2263 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
2264 goto end;
2265 }
2266
2267 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
2268
2269 if (mgn_set >= 128) {
2270 div_set = 0x0003;
2271 mgn_set = 0x000F;
2272 } else if (mgn_set >= 64) {
2273 div_set = 0x0003;
2274 mgn_set >>= 3;
2275 } else if (mgn_set >= 32) {
2276 div_set = 0x0002;
2277 mgn_set >>= 2;
2278 } else if (mgn_set >= 16) {
2279 div_set = 0x0001;
2280 mgn_set >>= 1;
2281 } else if (mgn_set == 0) {
2282 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
2283 goto end;
2284 } else {
2285 div_set = 0x0000;
2286 }
2287
2288 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2289 if (ret) {
2290 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2291 goto end;
2292 }
2293
2294 val16 |= u16_encode_bits(div_set, B_AX_DIV);
2295
2296 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
2297 if (ret) {
2298 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2299 goto end;
2300 }
2301
2302 ret = __get_target(rtwdev, &tar, phy_rate);
2303 if (ret) {
2304 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
2305 goto end;
2306 }
2307
2308 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
2309 tar, div_set, mgn_set);
2310 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
2311 (tar & 0x0FFF) | (mgn_set << 12), phy_rate);
2312 if (ret) {
2313 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
2314 goto end;
2315 }
2316
2317 /* Enable function */
2318 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
2319 if (ret) {
2320 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2321 goto end;
2322 }
2323
2324 /* CLK delay = 0 */
2325 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
2326 PCIE_CLKDLY_HW_0);
2327
2328 end:
2329 /* Set L1BD to ori */
2330 if (l1_flag) {
2331 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2332 bdr_ori);
2333 if (ret) {
2334 rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2335 RTW89_PCIE_L1_CTRL);
2336 return ret;
2337 }
2338 }
2339
2340 return ret;
2341 }
2342
rtw89_pci_deglitch_setting(struct rtw89_dev * rtwdev)2343 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
2344 {
2345 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2346 int ret;
2347
2348 if (chip_id == RTL8852A) {
2349 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2350 PCIE_PHY_GEN1);
2351 if (ret)
2352 return ret;
2353 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2354 PCIE_PHY_GEN2);
2355 if (ret)
2356 return ret;
2357 } else if (chip_id == RTL8852C) {
2358 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
2359 B_AX_DEGLITCH);
2360 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
2361 B_AX_DEGLITCH);
2362 }
2363
2364 return 0;
2365 }
2366
rtw89_pci_disable_eq_ax(struct rtw89_dev * rtwdev)2367 static void rtw89_pci_disable_eq_ax(struct rtw89_dev *rtwdev)
2368 {
2369 u16 g1_oobs, g2_oobs;
2370 u32 backup_aspm;
2371 u32 phy_offset;
2372 u16 offset_cal;
2373 u16 oobs_val;
2374 int ret;
2375 u8 gen;
2376
2377 if (rtwdev->chip->chip_id != RTL8852C)
2378 return;
2379
2380 g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
2381 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2382 g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 +
2383 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2384 if (g1_oobs && g2_oobs)
2385 return;
2386
2387 backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1);
2388 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
2389
2390 ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset);
2391 if (ret)
2392 goto out;
2393
2394 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN);
2395 rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL);
2396 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL);
2397
2398 oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT,
2399 OOBS_LEVEL_MASK);
2400
2401 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT,
2402 OOBS_SEN_MASK, oobs_val);
2403 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT,
2404 BAC_OOBS_SEL);
2405
2406 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT,
2407 OOBS_SEN_MASK, oobs_val);
2408 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT,
2409 BAC_OOBS_SEL);
2410
2411 /* offset K */
2412 for (gen = 1; gen <= 2; gen++) {
2413 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 :
2414 R_RAC_DIRECT_OFFSET_G2;
2415
2416 rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
2417 B_PCIE_BIT_RD_SEL);
2418 }
2419
2420 offset_cal = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
2421 RAC_ANA1F * RAC_MULT, OFFSET_CAL_MASK);
2422
2423 for (gen = 1; gen <= 2; gen++) {
2424 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 :
2425 R_RAC_DIRECT_OFFSET_G2;
2426
2427 rtw89_write16_mask(rtwdev, phy_offset + RAC_ANA0B * RAC_MULT,
2428 MANUAL_LVL_MASK, offset_cal);
2429 rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT,
2430 OFFSET_CAL_MODE);
2431 }
2432
2433 out:
2434 rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm);
2435 }
2436
rtw89_pci_ber(struct rtw89_dev * rtwdev)2437 static void rtw89_pci_ber(struct rtw89_dev *rtwdev)
2438 {
2439 u32 phy_offset;
2440
2441 if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks))
2442 return;
2443
2444 phy_offset = R_RAC_DIRECT_OFFSET_G1;
2445 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL);
2446 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2447
2448 phy_offset = R_RAC_DIRECT_OFFSET_G2;
2449 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL);
2450 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2451 }
2452
rtw89_pci_rxdma_prefth(struct rtw89_dev * rtwdev)2453 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
2454 {
2455 if (rtwdev->chip->chip_id != RTL8852A)
2456 return;
2457
2458 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
2459 }
2460
rtw89_pci_l1off_pwroff(struct rtw89_dev * rtwdev)2461 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
2462 {
2463 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2464
2465 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
2466 return;
2467
2468 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
2469 }
2470
rtw89_pci_l2_rxen_lat(struct rtw89_dev * rtwdev)2471 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
2472 {
2473 int ret;
2474
2475 if (rtwdev->chip->chip_id != RTL8852A)
2476 return 0;
2477
2478 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2479 PCIE_PHY_GEN1);
2480 if (ret)
2481 return ret;
2482
2483 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2484 PCIE_PHY_GEN2);
2485 if (ret)
2486 return ret;
2487
2488 return 0;
2489 }
2490
rtw89_pci_aphy_pwrcut(struct rtw89_dev * rtwdev)2491 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
2492 {
2493 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2494
2495 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
2496 return;
2497
2498 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
2499 }
2500
rtw89_pci_hci_ldo(struct rtw89_dev * rtwdev)2501 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
2502 {
2503 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2504
2505 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2506 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
2507 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2508 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2509 B_AX_PCIE_DIS_WLSUS_AFT_PDN);
2510 } else if (rtwdev->chip->chip_id == RTL8852C) {
2511 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2512 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2513 }
2514 }
2515
rtw89_pci_dphy_delay(struct rtw89_dev * rtwdev)2516 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
2517 {
2518 if (!rtw89_is_rtl885xb(rtwdev))
2519 return 0;
2520
2521 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
2522 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
2523 }
2524
rtw89_pci_power_wake_ax(struct rtw89_dev * rtwdev,bool pwr_up)2525 static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up)
2526 {
2527 if (pwr_up)
2528 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2529 else
2530 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2531 }
2532
rtw89_pci_autoload_hang(struct rtw89_dev * rtwdev)2533 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
2534 {
2535 if (rtwdev->chip->chip_id != RTL8852C)
2536 return;
2537
2538 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2539 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2540 }
2541
rtw89_pci_l12_vmain(struct rtw89_dev * rtwdev)2542 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
2543 {
2544 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2545 return;
2546
2547 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
2548 }
2549
rtw89_pci_gen2_force_ib(struct rtw89_dev * rtwdev)2550 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
2551 {
2552 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2553 return;
2554
2555 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
2556 B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2557 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
2558 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
2559 B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2560 }
2561
rtw89_pci_l1_ent_lat(struct rtw89_dev * rtwdev)2562 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
2563 {
2564 if (rtwdev->chip->chip_id != RTL8852C)
2565 return;
2566
2567 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
2568 }
2569
rtw89_pci_wd_exit_l1(struct rtw89_dev * rtwdev)2570 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
2571 {
2572 if (rtwdev->chip->chip_id != RTL8852C)
2573 return;
2574
2575 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
2576 }
2577
rtw89_pci_set_sic(struct rtw89_dev * rtwdev)2578 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
2579 {
2580 if (rtwdev->chip->chip_id == RTL8852C)
2581 return;
2582
2583 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
2584 B_AX_SIC_EN_FORCE_CLKREQ);
2585 }
2586
rtw89_pci_set_lbc(struct rtw89_dev * rtwdev)2587 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
2588 {
2589 const struct rtw89_pci_info *info = rtwdev->pci_info;
2590 u32 lbc;
2591
2592 if (rtwdev->chip->chip_id == RTL8852C)
2593 return;
2594
2595 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
2596 if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
2597 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER);
2598 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
2599 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2600 } else {
2601 lbc &= ~B_AX_LBC_EN;
2602 }
2603 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2604 }
2605
rtw89_pci_set_io_rcy(struct rtw89_dev * rtwdev)2606 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
2607 {
2608 const struct rtw89_pci_info *info = rtwdev->pci_info;
2609 u32 val32;
2610
2611 if (rtwdev->chip->chip_id != RTL8852C)
2612 return;
2613
2614 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
2615 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
2616 info->io_rcy_tmr);
2617 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32);
2618 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32);
2619 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32);
2620
2621 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2622 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2623 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2624 } else {
2625 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2626 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2627 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2628 }
2629
2630 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
2631 }
2632
rtw89_pci_set_dbg(struct rtw89_dev * rtwdev)2633 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
2634 {
2635 if (rtwdev->chip->chip_id == RTL8852C)
2636 return;
2637
2638 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
2639 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
2640
2641 if (rtwdev->chip->chip_id == RTL8852A)
2642 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
2643 B_AX_EN_CHKDSC_NO_RX_STUCK);
2644 }
2645
rtw89_pci_set_keep_reg(struct rtw89_dev * rtwdev)2646 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
2647 {
2648 if (rtwdev->chip->chip_id == RTL8852C)
2649 return;
2650
2651 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
2652 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
2653 }
2654
rtw89_pci_clr_idx_all_ax(struct rtw89_dev * rtwdev)2655 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev)
2656 {
2657 const struct rtw89_pci_info *info = rtwdev->pci_info;
2658 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2659 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
2660 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
2661 B_AX_CLR_CH12_IDX;
2662 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
2663 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
2664
2665 if (chip_id == RTL8852A || chip_id == RTL8852C)
2666 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
2667 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
2668 /* clear DMA indexes */
2669 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
2670 if (chip_id == RTL8852A || chip_id == RTL8852C)
2671 rtw89_write32_set(rtwdev, txbd_rwptr_clr2,
2672 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
2673 rtw89_write32_set(rtwdev, rxbd_rwptr_clr,
2674 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
2675 }
2676
rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev * rtwdev)2677 static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2678 {
2679 const struct rtw89_pci_info *info = rtwdev->pci_info;
2680 u32 dma_busy1 = info->dma_busy1.addr;
2681 u32 dma_busy2 = info->dma_busy2_reg;
2682 u32 check, dma_busy;
2683 int ret;
2684
2685 check = info->dma_busy1.mask;
2686
2687 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2688 10, 100, false, rtwdev, dma_busy1);
2689 if (ret)
2690 return ret;
2691
2692 if (!dma_busy2)
2693 return 0;
2694
2695 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
2696
2697 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2698 10, 100, false, rtwdev, dma_busy2);
2699 if (ret)
2700 return ret;
2701
2702 return 0;
2703 }
2704
rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev * rtwdev)2705 static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2706 {
2707 const struct rtw89_pci_info *info = rtwdev->pci_info;
2708 u32 dma_busy3 = info->dma_busy3_reg;
2709 u32 check, dma_busy;
2710 int ret;
2711
2712 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
2713
2714 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2715 10, 100, false, rtwdev, dma_busy3);
2716 if (ret)
2717 return ret;
2718
2719 return 0;
2720 }
2721
rtw89_pci_poll_dma_all_idle(struct rtw89_dev * rtwdev)2722 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
2723 {
2724 u32 ret;
2725
2726 ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
2727 if (ret) {
2728 rtw89_err(rtwdev, "txdma ch busy\n");
2729 return ret;
2730 }
2731
2732 ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev);
2733 if (ret) {
2734 rtw89_err(rtwdev, "rxdma ch busy\n");
2735 return ret;
2736 }
2737
2738 return 0;
2739 }
2740
rtw89_pci_mode_op(struct rtw89_dev * rtwdev)2741 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
2742 {
2743 const struct rtw89_pci_info *info = rtwdev->pci_info;
2744 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
2745 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
2746 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
2747 enum mac_ax_tag_mode tag_mode = info->tag_mode;
2748 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
2749 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
2750 enum mac_ax_tx_burst tx_burst = info->tx_burst;
2751 enum mac_ax_rx_burst rx_burst = info->rx_burst;
2752 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2753 u8 cv = rtwdev->hal.cv;
2754 u32 val32;
2755
2756 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2757 if (chip_id == RTL8852A && cv == CHIP_CBV)
2758 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2759 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2760 if (chip_id == RTL8852A || chip_id == RTL8852B)
2761 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2762 }
2763
2764 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
2765 if (chip_id == RTL8852A && cv == CHIP_CBV)
2766 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2767 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
2768 if (chip_id == RTL8852A || chip_id == RTL8852B)
2769 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2770 }
2771
2772 if (rxbd_mode == MAC_AX_RXBD_PKT) {
2773 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2774 } else if (rxbd_mode == MAC_AX_RXBD_SEP) {
2775 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2776
2777 if (chip_id == RTL8852A || chip_id == RTL8852B)
2778 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
2779 B_AX_PCIE_RX_APPLEN_MASK, 0);
2780 }
2781
2782 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2783 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
2784 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
2785 } else if (chip_id == RTL8852C) {
2786 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst);
2787 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
2788 }
2789
2790 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2791 if (tag_mode == MAC_AX_TAG_SGL) {
2792 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
2793 ~B_AX_LATENCY_CONTROL;
2794 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2795 } else if (tag_mode == MAC_AX_TAG_MULTI) {
2796 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
2797 B_AX_LATENCY_CONTROL;
2798 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2799 }
2800 }
2801
2802 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
2803 info->multi_tag_num);
2804
2805 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2806 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
2807 wd_dma_idle_intvl);
2808 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
2809 wd_dma_act_intvl);
2810 } else if (chip_id == RTL8852C) {
2811 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
2812 wd_dma_idle_intvl);
2813 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
2814 wd_dma_act_intvl);
2815 }
2816
2817 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2818 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2819 B_AX_HOST_ADDR_INFO_8B_SEL);
2820 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2821 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2822 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2823 B_AX_HOST_ADDR_INFO_8B_SEL);
2824 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2825 }
2826
2827 return 0;
2828 }
2829
rtw89_pci_ops_deinit(struct rtw89_dev * rtwdev)2830 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
2831 {
2832 const struct rtw89_pci_info *info = rtwdev->pci_info;
2833
2834 rtw89_pci_power_wake(rtwdev, false);
2835
2836 if (rtwdev->chip->chip_id == RTL8852A) {
2837 /* ltr sw trigger */
2838 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
2839 }
2840 info->ltr_set(rtwdev, false);
2841 rtw89_pci_ctrl_dma_all(rtwdev, false);
2842 rtw89_pci_clr_idx_all(rtwdev);
2843
2844 return 0;
2845 }
2846
rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev * rtwdev)2847 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
2848 {
2849 const struct rtw89_pci_info *info = rtwdev->pci_info;
2850 int ret;
2851
2852 rtw89_pci_ber(rtwdev);
2853 rtw89_pci_rxdma_prefth(rtwdev);
2854 rtw89_pci_l1off_pwroff(rtwdev);
2855 rtw89_pci_deglitch_setting(rtwdev);
2856 ret = rtw89_pci_l2_rxen_lat(rtwdev);
2857 if (ret) {
2858 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
2859 return ret;
2860 }
2861
2862 rtw89_pci_aphy_pwrcut(rtwdev);
2863 rtw89_pci_hci_ldo(rtwdev);
2864 rtw89_pci_dphy_delay(rtwdev);
2865
2866 ret = rtw89_pci_autok_x(rtwdev);
2867 if (ret) {
2868 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret);
2869 return ret;
2870 }
2871
2872 ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
2873 if (ret) {
2874 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
2875 return ret;
2876 }
2877
2878 rtw89_pci_power_wake_ax(rtwdev, true);
2879 rtw89_pci_autoload_hang(rtwdev);
2880 rtw89_pci_l12_vmain(rtwdev);
2881 rtw89_pci_gen2_force_ib(rtwdev);
2882 rtw89_pci_l1_ent_lat(rtwdev);
2883 rtw89_pci_wd_exit_l1(rtwdev);
2884 rtw89_pci_set_sic(rtwdev);
2885 rtw89_pci_set_lbc(rtwdev);
2886 rtw89_pci_set_io_rcy(rtwdev);
2887 rtw89_pci_set_dbg(rtwdev);
2888 rtw89_pci_set_keep_reg(rtwdev);
2889
2890 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA);
2891
2892 /* stop DMA activities */
2893 rtw89_pci_ctrl_dma_all(rtwdev, false);
2894
2895 ret = rtw89_pci_poll_dma_all_idle(rtwdev);
2896 if (ret) {
2897 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
2898 return ret;
2899 }
2900
2901 rtw89_pci_clr_idx_all(rtwdev);
2902 rtw89_pci_mode_op(rtwdev);
2903
2904 /* fill TRX BD indexes */
2905 rtw89_pci_ops_reset(rtwdev);
2906
2907 ret = rtw89_pci_rst_bdram_ax(rtwdev);
2908 if (ret) {
2909 rtw89_warn(rtwdev, "reset bdram busy\n");
2910 return ret;
2911 }
2912
2913 /* disable all channels except to FW CMD channel to download firmware */
2914 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false);
2915 rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true);
2916
2917 /* start DMA activities */
2918 rtw89_pci_ctrl_dma_all(rtwdev, true);
2919
2920 return 0;
2921 }
2922
rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev * rtwdev)2923 static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev)
2924 {
2925 rtw89_pci_power_wake_ax(rtwdev, false);
2926
2927 return 0;
2928 }
2929
rtw89_pci_ltr_set(struct rtw89_dev * rtwdev,bool en)2930 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
2931 {
2932 u32 val;
2933
2934 if (!en)
2935 return 0;
2936
2937 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2938 if (rtw89_pci_ltr_is_err_reg_val(val))
2939 return -EINVAL;
2940 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2941 if (rtw89_pci_ltr_is_err_reg_val(val))
2942 return -EINVAL;
2943 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
2944 if (rtw89_pci_ltr_is_err_reg_val(val))
2945 return -EINVAL;
2946 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
2947 if (rtw89_pci_ltr_is_err_reg_val(val))
2948 return -EINVAL;
2949
2950 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN |
2951 B_AX_LTR_WD_NOEMP_CHK);
2952 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
2953 PCI_LTR_SPC_500US);
2954 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2955 PCI_LTR_IDLE_TIMER_3_2MS);
2956 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2957 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2958 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003);
2959 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
2960
2961 return 0;
2962 }
2963 EXPORT_SYMBOL(rtw89_pci_ltr_set);
2964
rtw89_pci_ltr_set_v1(struct rtw89_dev * rtwdev,bool en)2965 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
2966 {
2967 u32 dec_ctrl;
2968 u32 val32;
2969
2970 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2971 if (rtw89_pci_ltr_is_err_reg_val(val32))
2972 return -EINVAL;
2973 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2974 if (rtw89_pci_ltr_is_err_reg_val(val32))
2975 return -EINVAL;
2976 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
2977 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
2978 return -EINVAL;
2979 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
2980 if (rtw89_pci_ltr_is_err_reg_val(val32))
2981 return -EINVAL;
2982 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
2983 if (rtw89_pci_ltr_is_err_reg_val(val32))
2984 return -EINVAL;
2985
2986 if (!en) {
2987 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
2988 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
2989 B_AX_LTR_REQ_DRV;
2990 } else {
2991 dec_ctrl |= B_AX_LTR_HW_DEC_EN;
2992 }
2993
2994 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
2995 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
2996
2997 if (en)
2998 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
2999 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
3000 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
3001 PCI_LTR_IDLE_TIMER_3_2MS);
3002 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
3003 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
3004 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl);
3005 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003);
3006 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b);
3007
3008 return 0;
3009 }
3010 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
3011
rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev * rtwdev)3012 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
3013 {
3014 const struct rtw89_pci_info *info = rtwdev->pci_info;
3015 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3016 int ret;
3017
3018 ret = info->ltr_set(rtwdev, true);
3019 if (ret) {
3020 rtw89_err(rtwdev, "pci ltr set fail\n");
3021 return ret;
3022 }
3023 if (chip_id == RTL8852A) {
3024 /* ltr sw trigger */
3025 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
3026 }
3027 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3028 /* ADDR info 8-byte mode */
3029 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
3030 B_AX_HOST_ADDR_INFO_8B_SEL);
3031 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
3032 }
3033
3034 /* enable DMA for all queues */
3035 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true);
3036
3037 /* Release PCI IO */
3038 rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
3039 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
3040
3041 return 0;
3042 }
3043
rtw89_pci_claim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3044 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
3045 struct pci_dev *pdev)
3046 {
3047 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3048 int ret;
3049
3050 ret = pci_enable_device(pdev);
3051 if (ret) {
3052 rtw89_err(rtwdev, "failed to enable pci device\n");
3053 return ret;
3054 }
3055
3056 pci_set_master(pdev);
3057 pci_set_drvdata(pdev, rtwdev->hw);
3058
3059 rtwpci->pdev = pdev;
3060
3061 return 0;
3062 }
3063
rtw89_pci_declaim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3064 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
3065 struct pci_dev *pdev)
3066 {
3067 pci_disable_device(pdev);
3068 }
3069
rtw89_pci_chip_is_manual_dac(struct rtw89_dev * rtwdev)3070 static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev)
3071 {
3072 const struct rtw89_chip_info *chip = rtwdev->chip;
3073
3074 switch (chip->chip_id) {
3075 case RTL8852A:
3076 case RTL8852B:
3077 case RTL8851B:
3078 case RTL8852BT:
3079 return true;
3080 default:
3081 return false;
3082 }
3083 }
3084
rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev * rtwdev)3085 static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev)
3086 {
3087 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3088 struct pci_dev *bridge = pci_upstream_bridge(rtwpci->pdev);
3089
3090 if (!rtw89_pci_chip_is_manual_dac(rtwdev))
3091 return true;
3092
3093 if (!bridge)
3094 return false;
3095
3096 switch (bridge->vendor) {
3097 case PCI_VENDOR_ID_INTEL:
3098 return true;
3099 case PCI_VENDOR_ID_ASMEDIA:
3100 if (bridge->device == 0x2806)
3101 return true;
3102 break;
3103 }
3104
3105 return false;
3106 }
3107
rtw89_pci_cfg_dac(struct rtw89_dev * rtwdev)3108 static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev)
3109 {
3110 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3111
3112 if (!rtwpci->enable_dac)
3113 return;
3114
3115 if (!rtw89_pci_chip_is_manual_dac(rtwdev))
3116 return;
3117
3118 rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS);
3119 }
3120
rtw89_pci_setup_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3121 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
3122 struct pci_dev *pdev)
3123 {
3124 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3125 unsigned long resource_len;
3126 u8 bar_id = 2;
3127 int ret;
3128
3129 ret = pci_request_regions(pdev, KBUILD_MODNAME);
3130 if (ret) {
3131 rtw89_err(rtwdev, "failed to request pci regions\n");
3132 goto err;
3133 }
3134
3135 if (!rtw89_pci_is_dac_compatible_bridge(rtwdev))
3136 goto no_dac;
3137
3138 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
3139 if (!ret) {
3140 rtwpci->enable_dac = true;
3141 rtw89_pci_cfg_dac(rtwdev);
3142 } else {
3143 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3144 if (ret) {
3145 rtw89_err(rtwdev,
3146 "failed to set dma and consistent mask to 32/36-bit\n");
3147 goto err_release_regions;
3148 }
3149 }
3150 no_dac:
3151
3152 resource_len = pci_resource_len(pdev, bar_id);
3153 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
3154 if (!rtwpci->mmap) {
3155 rtw89_err(rtwdev, "failed to map pci io\n");
3156 ret = -EIO;
3157 goto err_release_regions;
3158 }
3159
3160 return 0;
3161
3162 err_release_regions:
3163 pci_release_regions(pdev);
3164 err:
3165 return ret;
3166 }
3167
rtw89_pci_clear_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3168 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
3169 struct pci_dev *pdev)
3170 {
3171 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3172
3173 if (rtwpci->mmap) {
3174 pci_iounmap(pdev, rtwpci->mmap);
3175 pci_release_regions(pdev);
3176 }
3177 }
3178
rtw89_pci_free_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)3179 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
3180 struct pci_dev *pdev,
3181 struct rtw89_pci_tx_ring *tx_ring)
3182 {
3183 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3184 u8 *head = wd_ring->head;
3185 dma_addr_t dma = wd_ring->dma;
3186 u32 page_size = wd_ring->page_size;
3187 u32 page_num = wd_ring->page_num;
3188 u32 ring_sz = page_size * page_num;
3189
3190 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3191 wd_ring->head = NULL;
3192 }
3193
rtw89_pci_free_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)3194 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
3195 struct pci_dev *pdev,
3196 struct rtw89_pci_tx_ring *tx_ring)
3197 {
3198 int ring_sz;
3199 u8 *head;
3200 dma_addr_t dma;
3201
3202 head = tx_ring->bd_ring.head;
3203 dma = tx_ring->bd_ring.dma;
3204 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
3205 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3206
3207 tx_ring->bd_ring.head = NULL;
3208 }
3209
rtw89_pci_free_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3210 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
3211 struct pci_dev *pdev)
3212 {
3213 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3214 const struct rtw89_pci_info *info = rtwdev->pci_info;
3215 struct rtw89_pci_tx_ring *tx_ring;
3216 int i;
3217
3218 for (i = 0; i < RTW89_TXCH_NUM; i++) {
3219 if (info->tx_dma_ch_mask & BIT(i))
3220 continue;
3221 tx_ring = &rtwpci->tx_rings[i];
3222 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3223 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3224 }
3225 }
3226
rtw89_pci_free_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring)3227 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
3228 struct pci_dev *pdev,
3229 struct rtw89_pci_rx_ring *rx_ring)
3230 {
3231 struct rtw89_pci_rx_info *rx_info;
3232 struct sk_buff *skb;
3233 dma_addr_t dma;
3234 u32 buf_sz;
3235 u8 *head;
3236 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
3237 int i;
3238
3239 buf_sz = rx_ring->buf_sz;
3240 for (i = 0; i < rx_ring->bd_ring.len; i++) {
3241 skb = rx_ring->buf[i];
3242 if (!skb)
3243 continue;
3244
3245 rx_info = RTW89_PCI_RX_SKB_CB(skb);
3246 dma = rx_info->dma;
3247 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3248 dev_kfree_skb(skb);
3249 rx_ring->buf[i] = NULL;
3250 }
3251
3252 head = rx_ring->bd_ring.head;
3253 dma = rx_ring->bd_ring.dma;
3254 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3255
3256 rx_ring->bd_ring.head = NULL;
3257 }
3258
rtw89_pci_free_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3259 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
3260 struct pci_dev *pdev)
3261 {
3262 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3263 struct rtw89_pci_rx_ring *rx_ring;
3264 int i;
3265
3266 for (i = 0; i < RTW89_RXCH_NUM; i++) {
3267 rx_ring = &rtwpci->rx_rings[i];
3268 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3269 }
3270 }
3271
rtw89_pci_free_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3272 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
3273 struct pci_dev *pdev)
3274 {
3275 rtw89_pci_free_rx_rings(rtwdev, pdev);
3276 rtw89_pci_free_tx_rings(rtwdev, pdev);
3277 }
3278
rtw89_pci_init_rx_bd(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb,int buf_sz,u32 idx)3279 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
3280 struct rtw89_pci_rx_ring *rx_ring,
3281 struct sk_buff *skb, int buf_sz, u32 idx)
3282 {
3283 struct rtw89_pci_rx_info *rx_info;
3284 struct rtw89_pci_rx_bd_32 *rx_bd;
3285 dma_addr_t dma;
3286
3287 if (!skb)
3288 return -EINVAL;
3289
3290 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
3291 if (dma_mapping_error(&pdev->dev, dma))
3292 return -EBUSY;
3293
3294 rx_info = RTW89_PCI_RX_SKB_CB(skb);
3295 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
3296
3297 memset(rx_bd, 0, sizeof(*rx_bd));
3298 rx_bd->buf_size = cpu_to_le16(buf_sz);
3299 rx_bd->dma = cpu_to_le32(dma);
3300 rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI);
3301 rx_info->dma = dma;
3302
3303 return 0;
3304 }
3305
rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,enum rtw89_tx_channel txch)3306 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
3307 struct pci_dev *pdev,
3308 struct rtw89_pci_tx_ring *tx_ring,
3309 enum rtw89_tx_channel txch)
3310 {
3311 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3312 struct rtw89_pci_tx_wd *txwd;
3313 dma_addr_t dma;
3314 dma_addr_t cur_paddr;
3315 u8 *head;
3316 u8 *cur_vaddr;
3317 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
3318 u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
3319 u32 ring_sz = page_size * page_num;
3320 u32 page_offset;
3321 int i;
3322
3323 /* FWCMD queue doesn't use txwd as pages */
3324 if (txch == RTW89_TXCH_CH12)
3325 return 0;
3326
3327 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3328 if (!head)
3329 return -ENOMEM;
3330
3331 INIT_LIST_HEAD(&wd_ring->free_pages);
3332 wd_ring->head = head;
3333 wd_ring->dma = dma;
3334 wd_ring->page_size = page_size;
3335 wd_ring->page_num = page_num;
3336
3337 page_offset = 0;
3338 for (i = 0; i < page_num; i++) {
3339 txwd = &wd_ring->pages[i];
3340 cur_paddr = dma + page_offset;
3341 cur_vaddr = head + page_offset;
3342
3343 skb_queue_head_init(&txwd->queue);
3344 INIT_LIST_HEAD(&txwd->list);
3345 txwd->paddr = cur_paddr;
3346 txwd->vaddr = cur_vaddr;
3347 txwd->len = page_size;
3348 txwd->seq = i;
3349 rtw89_pci_enqueue_txwd(tx_ring, txwd);
3350
3351 page_offset += page_size;
3352 }
3353
3354 return 0;
3355 }
3356
rtw89_pci_alloc_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,u32 desc_size,u32 len,enum rtw89_tx_channel txch)3357 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
3358 struct pci_dev *pdev,
3359 struct rtw89_pci_tx_ring *tx_ring,
3360 u32 desc_size, u32 len,
3361 enum rtw89_tx_channel txch)
3362 {
3363 const struct rtw89_pci_ch_dma_addr *txch_addr;
3364 int ring_sz = desc_size * len;
3365 u8 *head;
3366 dma_addr_t dma;
3367 int ret;
3368
3369 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
3370 if (ret) {
3371 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
3372 goto err;
3373 }
3374
3375 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
3376 if (ret) {
3377 rtw89_err(rtwdev, "failed to get address of txch %d", txch);
3378 goto err_free_wd_ring;
3379 }
3380
3381 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3382 if (!head) {
3383 ret = -ENOMEM;
3384 goto err_free_wd_ring;
3385 }
3386
3387 INIT_LIST_HEAD(&tx_ring->busy_pages);
3388 tx_ring->bd_ring.head = head;
3389 tx_ring->bd_ring.dma = dma;
3390 tx_ring->bd_ring.len = len;
3391 tx_ring->bd_ring.desc_size = desc_size;
3392 tx_ring->bd_ring.addr = *txch_addr;
3393 tx_ring->bd_ring.wp = 0;
3394 tx_ring->bd_ring.rp = 0;
3395 tx_ring->txch = txch;
3396
3397 return 0;
3398
3399 err_free_wd_ring:
3400 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3401 err:
3402 return ret;
3403 }
3404
rtw89_pci_alloc_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3405 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
3406 struct pci_dev *pdev)
3407 {
3408 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3409 const struct rtw89_pci_info *info = rtwdev->pci_info;
3410 struct rtw89_pci_tx_ring *tx_ring;
3411 u32 desc_size;
3412 u32 len;
3413 u32 i, tx_allocated;
3414 int ret;
3415
3416 for (i = 0; i < RTW89_TXCH_NUM; i++) {
3417 if (info->tx_dma_ch_mask & BIT(i))
3418 continue;
3419 tx_ring = &rtwpci->tx_rings[i];
3420 desc_size = sizeof(struct rtw89_pci_tx_bd_32);
3421 len = RTW89_PCI_TXBD_NUM_MAX;
3422 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
3423 desc_size, len, i);
3424 if (ret) {
3425 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
3426 goto err_free;
3427 }
3428 }
3429
3430 return 0;
3431
3432 err_free:
3433 tx_allocated = i;
3434 for (i = 0; i < tx_allocated; i++) {
3435 tx_ring = &rtwpci->tx_rings[i];
3436 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3437 }
3438
3439 return ret;
3440 }
3441
rtw89_pci_alloc_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,u32 desc_size,u32 len,u32 rxch)3442 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
3443 struct pci_dev *pdev,
3444 struct rtw89_pci_rx_ring *rx_ring,
3445 u32 desc_size, u32 len, u32 rxch)
3446 {
3447 const struct rtw89_pci_info *info = rtwdev->pci_info;
3448 const struct rtw89_pci_ch_dma_addr *rxch_addr;
3449 struct sk_buff *skb;
3450 u8 *head;
3451 dma_addr_t dma;
3452 int ring_sz = desc_size * len;
3453 int buf_sz = RTW89_PCI_RX_BUF_SIZE;
3454 int i, allocated;
3455 int ret;
3456
3457 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
3458 if (ret) {
3459 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
3460 return ret;
3461 }
3462
3463 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3464 if (!head) {
3465 ret = -ENOMEM;
3466 goto err;
3467 }
3468
3469 rx_ring->bd_ring.head = head;
3470 rx_ring->bd_ring.dma = dma;
3471 rx_ring->bd_ring.len = len;
3472 rx_ring->bd_ring.desc_size = desc_size;
3473 rx_ring->bd_ring.addr = *rxch_addr;
3474 if (info->rx_ring_eq_is_full)
3475 rx_ring->bd_ring.wp = len - 1;
3476 else
3477 rx_ring->bd_ring.wp = 0;
3478 rx_ring->bd_ring.rp = 0;
3479 rx_ring->buf_sz = buf_sz;
3480 rx_ring->diliver_skb = NULL;
3481 rx_ring->diliver_desc.ready = false;
3482 rx_ring->target_rx_tag = 0;
3483
3484 for (i = 0; i < len; i++) {
3485 skb = dev_alloc_skb(buf_sz);
3486 if (!skb) {
3487 ret = -ENOMEM;
3488 goto err_free;
3489 }
3490
3491 memset(skb->data, 0, buf_sz);
3492 rx_ring->buf[i] = skb;
3493 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
3494 buf_sz, i);
3495 if (ret) {
3496 rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
3497 dev_kfree_skb_any(skb);
3498 rx_ring->buf[i] = NULL;
3499 goto err_free;
3500 }
3501 }
3502
3503 return 0;
3504
3505 err_free:
3506 allocated = i;
3507 for (i = 0; i < allocated; i++) {
3508 skb = rx_ring->buf[i];
3509 if (!skb)
3510 continue;
3511 dma = *((dma_addr_t *)skb->cb);
3512 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3513 dev_kfree_skb(skb);
3514 rx_ring->buf[i] = NULL;
3515 }
3516
3517 head = rx_ring->bd_ring.head;
3518 dma = rx_ring->bd_ring.dma;
3519 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3520
3521 rx_ring->bd_ring.head = NULL;
3522 err:
3523 return ret;
3524 }
3525
rtw89_pci_alloc_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3526 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
3527 struct pci_dev *pdev)
3528 {
3529 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3530 struct rtw89_pci_rx_ring *rx_ring;
3531 u32 desc_size;
3532 u32 len;
3533 int i, rx_allocated;
3534 int ret;
3535
3536 for (i = 0; i < RTW89_RXCH_NUM; i++) {
3537 rx_ring = &rtwpci->rx_rings[i];
3538 desc_size = sizeof(struct rtw89_pci_rx_bd_32);
3539 len = RTW89_PCI_RXBD_NUM_MAX;
3540 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
3541 desc_size, len, i);
3542 if (ret) {
3543 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
3544 goto err_free;
3545 }
3546 }
3547
3548 return 0;
3549
3550 err_free:
3551 rx_allocated = i;
3552 for (i = 0; i < rx_allocated; i++) {
3553 rx_ring = &rtwpci->rx_rings[i];
3554 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3555 }
3556
3557 return ret;
3558 }
3559
rtw89_pci_alloc_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3560 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
3561 struct pci_dev *pdev)
3562 {
3563 int ret;
3564
3565 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
3566 if (ret) {
3567 rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
3568 goto err;
3569 }
3570
3571 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
3572 if (ret) {
3573 rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
3574 goto err_free_tx_rings;
3575 }
3576
3577 return 0;
3578
3579 err_free_tx_rings:
3580 rtw89_pci_free_tx_rings(rtwdev, pdev);
3581 err:
3582 return ret;
3583 }
3584
rtw89_pci_h2c_init(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)3585 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
3586 struct rtw89_pci *rtwpci)
3587 {
3588 skb_queue_head_init(&rtwpci->h2c_queue);
3589 skb_queue_head_init(&rtwpci->h2c_release_queue);
3590 }
3591
rtw89_pci_setup_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3592 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
3593 struct pci_dev *pdev)
3594 {
3595 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3596 int ret;
3597
3598 ret = rtw89_pci_setup_mapping(rtwdev, pdev);
3599 if (ret) {
3600 rtw89_err(rtwdev, "failed to setup pci mapping\n");
3601 goto err;
3602 }
3603
3604 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
3605 if (ret) {
3606 rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
3607 goto err_pci_unmap;
3608 }
3609
3610 rtw89_pci_h2c_init(rtwdev, rtwpci);
3611
3612 spin_lock_init(&rtwpci->irq_lock);
3613 spin_lock_init(&rtwpci->trx_lock);
3614
3615 return 0;
3616
3617 err_pci_unmap:
3618 rtw89_pci_clear_mapping(rtwdev, pdev);
3619 err:
3620 return ret;
3621 }
3622
rtw89_pci_clear_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3623 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
3624 struct pci_dev *pdev)
3625 {
3626 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3627
3628 rtw89_pci_free_trx_rings(rtwdev, pdev);
3629 rtw89_pci_clear_mapping(rtwdev, pdev);
3630 rtw89_pci_release_fwcmd(rtwdev, rtwpci,
3631 skb_queue_len(&rtwpci->h2c_queue), true);
3632 }
3633
rtw89_pci_config_intr_mask(struct rtw89_dev * rtwdev)3634 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
3635 {
3636 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3637 const struct rtw89_chip_info *chip = rtwdev->chip;
3638 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN;
3639
3640 if (chip->chip_id == RTL8851B)
3641 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND;
3642
3643 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
3644
3645 if (rtwpci->under_recovery) {
3646 rtwpci->intrs[0] = hs0isr_ind_int_en;
3647 rtwpci->intrs[1] = 0;
3648 } else {
3649 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3650 B_AX_RXDMA_INT_EN |
3651 B_AX_RXP1DMA_INT_EN |
3652 B_AX_RPQDMA_INT_EN |
3653 B_AX_RXDMA_STUCK_INT_EN |
3654 B_AX_RDU_INT_EN |
3655 B_AX_RPQBD_FULL_INT_EN |
3656 hs0isr_ind_int_en;
3657
3658 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
3659 }
3660 }
3661 EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
3662
rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev * rtwdev)3663 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
3664 {
3665 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3666
3667 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
3668 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3669 rtwpci->intrs[0] = 0;
3670 rtwpci->intrs[1] = 0;
3671 }
3672
rtw89_pci_default_intr_mask_v1(struct rtw89_dev * rtwdev)3673 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
3674 {
3675 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3676
3677 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
3678 B_AX_HS1ISR_IND_INT_EN |
3679 B_AX_HS0ISR_IND_INT_EN;
3680 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3681 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3682 B_AX_RXDMA_INT_EN |
3683 B_AX_RXP1DMA_INT_EN |
3684 B_AX_RPQDMA_INT_EN |
3685 B_AX_RXDMA_STUCK_INT_EN |
3686 B_AX_RDU_INT_EN |
3687 B_AX_RPQBD_FULL_INT_EN;
3688 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3689 }
3690
rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev * rtwdev)3691 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
3692 {
3693 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3694
3695 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
3696 B_AX_HS0ISR_IND_INT_EN;
3697 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3698 rtwpci->intrs[0] = 0;
3699 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3700 }
3701
rtw89_pci_config_intr_mask_v1(struct rtw89_dev * rtwdev)3702 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
3703 {
3704 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3705
3706 if (rtwpci->under_recovery)
3707 rtw89_pci_recovery_intr_mask_v1(rtwdev);
3708 else if (rtwpci->low_power)
3709 rtw89_pci_low_power_intr_mask_v1(rtwdev);
3710 else
3711 rtw89_pci_default_intr_mask_v1(rtwdev);
3712 }
3713 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
3714
rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev * rtwdev)3715 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev)
3716 {
3717 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3718
3719 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
3720 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3721 rtwpci->intrs[0] = 0;
3722 rtwpci->intrs[1] = 0;
3723 }
3724
rtw89_pci_default_intr_mask_v2(struct rtw89_dev * rtwdev)3725 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev)
3726 {
3727 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3728
3729 rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 |
3730 B_BE_HS0_IND_INT_EN0;
3731 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3732 rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 |
3733 B_BE_RDU_CH0_INT_IMR_V1;
3734 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
3735 B_BE_PCIE_RX_RPQ0_IMR0_V1;
3736 }
3737
rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev * rtwdev)3738 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev)
3739 {
3740 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3741
3742 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 |
3743 B_BE_HS1_IND_INT_EN0;
3744 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3745 rtwpci->intrs[0] = 0;
3746 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
3747 B_BE_PCIE_RX_RPQ0_IMR0_V1;
3748 }
3749
rtw89_pci_config_intr_mask_v2(struct rtw89_dev * rtwdev)3750 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev)
3751 {
3752 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3753
3754 if (rtwpci->under_recovery)
3755 rtw89_pci_recovery_intr_mask_v2(rtwdev);
3756 else if (rtwpci->low_power)
3757 rtw89_pci_low_power_intr_mask_v2(rtwdev);
3758 else
3759 rtw89_pci_default_intr_mask_v2(rtwdev);
3760 }
3761 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2);
3762
rtw89_pci_request_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3763 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
3764 struct pci_dev *pdev)
3765 {
3766 unsigned long flags = 0;
3767 int ret;
3768
3769 flags |= PCI_IRQ_INTX | PCI_IRQ_MSI;
3770 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
3771 if (ret < 0) {
3772 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
3773 goto err;
3774 }
3775
3776 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
3777 rtw89_pci_interrupt_handler,
3778 rtw89_pci_interrupt_threadfn,
3779 IRQF_SHARED, KBUILD_MODNAME, rtwdev);
3780 if (ret) {
3781 rtw89_err(rtwdev, "failed to request threaded irq\n");
3782 goto err_free_vector;
3783 }
3784
3785 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET);
3786
3787 return 0;
3788
3789 err_free_vector:
3790 pci_free_irq_vectors(pdev);
3791 err:
3792 return ret;
3793 }
3794
rtw89_pci_free_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3795 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
3796 struct pci_dev *pdev)
3797 {
3798 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
3799 pci_free_irq_vectors(pdev);
3800 }
3801
gray_code_to_bin(u16 gray_code)3802 static u16 gray_code_to_bin(u16 gray_code)
3803 {
3804 u16 binary = gray_code;
3805
3806 while (gray_code) {
3807 gray_code >>= 1;
3808 binary ^= gray_code;
3809 }
3810
3811 return binary;
3812 }
3813
rtw89_pci_filter_out(struct rtw89_dev * rtwdev)3814 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
3815 {
3816 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3817 struct pci_dev *pdev = rtwpci->pdev;
3818 u16 val16, filter_out_val;
3819 u32 val, phy_offset;
3820 int ret;
3821
3822 if (rtwdev->chip->chip_id != RTL8852C)
3823 return 0;
3824
3825 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
3826 if (val == B_AX_ASPM_CTRL_L1)
3827 return 0;
3828
3829 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
3830 if (ret)
3831 return ret;
3832
3833 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
3834 if (val == RTW89_PCIE_GEN1_SPEED) {
3835 phy_offset = R_RAC_DIRECT_OFFSET_G1;
3836 } else if (val == RTW89_PCIE_GEN2_SPEED) {
3837 phy_offset = R_RAC_DIRECT_OFFSET_G2;
3838 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT);
3839 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT,
3840 val16 | B_PCIE_BIT_PINOUT_DIS);
3841 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
3842 val16 & ~B_PCIE_BIT_RD_SEL);
3843
3844 val16 = rtw89_read16_mask(rtwdev,
3845 phy_offset + RAC_ANA1F * RAC_MULT,
3846 FILTER_OUT_EQ_MASK);
3847 val16 = gray_code_to_bin(val16);
3848 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 *
3849 RAC_MULT);
3850 filter_out_val &= ~REG_FILTER_OUT_MASK;
3851 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
3852
3853 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT,
3854 filter_out_val);
3855 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT,
3856 B_BAC_EQ_SEL);
3857 rtw89_write16_set(rtwdev,
3858 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
3859 B_PCIE_BIT_PSAVE);
3860 } else {
3861 return -EOPNOTSUPP;
3862 }
3863 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT,
3864 B_PCIE_BIT_PSAVE);
3865
3866 return 0;
3867 }
3868
rtw89_pci_clkreq_set(struct rtw89_dev * rtwdev,bool enable)3869 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
3870 {
3871 const struct rtw89_pci_info *info = rtwdev->pci_info;
3872 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
3873
3874 if (rtw89_pci_disable_clkreq)
3875 return;
3876
3877 gen_def->clkreq_set(rtwdev, enable);
3878 }
3879
rtw89_pci_clkreq_set_ax(struct rtw89_dev * rtwdev,bool enable)3880 static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable)
3881 {
3882 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3883 int ret;
3884
3885 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
3886 PCIE_CLKDLY_HW_30US);
3887 if (ret)
3888 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
3889
3890 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3891 if (enable)
3892 ret = rtw89_pci_config_byte_set(rtwdev,
3893 RTW89_PCIE_L1_CTRL,
3894 RTW89_PCIE_BIT_CLK);
3895 else
3896 ret = rtw89_pci_config_byte_clr(rtwdev,
3897 RTW89_PCIE_L1_CTRL,
3898 RTW89_PCIE_BIT_CLK);
3899 if (ret)
3900 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
3901 enable ? "set" : "unset", ret);
3902 } else if (chip_id == RTL8852C) {
3903 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
3904 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
3905 if (enable)
3906 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
3907 B_AX_CLK_REQ_N);
3908 else
3909 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
3910 B_AX_CLK_REQ_N);
3911 }
3912 }
3913
rtw89_pci_aspm_set(struct rtw89_dev * rtwdev,bool enable)3914 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
3915 {
3916 const struct rtw89_pci_info *info = rtwdev->pci_info;
3917 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
3918
3919 if (rtw89_pci_disable_aspm_l1)
3920 return;
3921
3922 gen_def->aspm_set(rtwdev, enable);
3923 }
3924
rtw89_pci_aspm_set_ax(struct rtw89_dev * rtwdev,bool enable)3925 static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable)
3926 {
3927 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3928 u8 value = 0;
3929 int ret;
3930
3931 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
3932 if (ret)
3933 rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
3934
3935 u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
3936 u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK);
3937
3938 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
3939 if (ret)
3940 rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
3941
3942 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3943 if (enable)
3944 ret = rtw89_pci_config_byte_set(rtwdev,
3945 RTW89_PCIE_L1_CTRL,
3946 RTW89_PCIE_BIT_L1);
3947 else
3948 ret = rtw89_pci_config_byte_clr(rtwdev,
3949 RTW89_PCIE_L1_CTRL,
3950 RTW89_PCIE_BIT_L1);
3951 } else if (chip_id == RTL8852C) {
3952 if (enable)
3953 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3954 B_AX_ASPM_CTRL_L1);
3955 else
3956 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3957 B_AX_ASPM_CTRL_L1);
3958 }
3959 if (ret)
3960 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
3961 enable ? "set" : "unset", ret);
3962 }
3963
rtw89_pci_recalc_int_mit(struct rtw89_dev * rtwdev)3964 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
3965 {
3966 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
3967 const struct rtw89_pci_info *info = rtwdev->pci_info;
3968 struct rtw89_traffic_stats *stats = &rtwdev->stats;
3969 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
3970 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
3971 u32 val = 0;
3972
3973 if (rtwdev->scanning ||
3974 (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH))
3975 goto out;
3976
3977 if (chip_gen == RTW89_CHIP_BE)
3978 val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN;
3979 else
3980 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
3981 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
3982 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
3983 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
3984
3985 out:
3986 rtw89_write32(rtwdev, info->mit_addr, val);
3987 }
3988
rtw89_pci_link_cfg(struct rtw89_dev * rtwdev)3989 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
3990 {
3991 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3992 struct pci_dev *pdev = rtwpci->pdev;
3993 u16 link_ctrl;
3994 int ret;
3995
3996 /* Though there is standard PCIE configuration space to set the
3997 * link control register, but by Realtek's design, driver should
3998 * check if host supports CLKREQ/ASPM to enable the HW module.
3999 *
4000 * These functions are implemented by two HW modules associated,
4001 * one is responsible to access PCIE configuration space to
4002 * follow the host settings, and another is in charge of doing
4003 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
4004 * the host does not support it, and due to some reasons or wrong
4005 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
4006 * loss if HW misbehaves on the link.
4007 *
4008 * Hence it's designed that driver should first check the PCIE
4009 * configuration space is sync'ed and enabled, then driver can turn
4010 * on the other module that is actually working on the mechanism.
4011 */
4012 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
4013 if (ret) {
4014 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
4015 return;
4016 }
4017
4018 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
4019 rtw89_pci_clkreq_set(rtwdev, true);
4020
4021 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
4022 rtw89_pci_aspm_set(rtwdev, true);
4023 }
4024
rtw89_pci_l1ss_set(struct rtw89_dev * rtwdev,bool enable)4025 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
4026 {
4027 const struct rtw89_pci_info *info = rtwdev->pci_info;
4028 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4029
4030 if (rtw89_pci_disable_l1ss)
4031 return;
4032
4033 gen_def->l1ss_set(rtwdev, enable);
4034 }
4035
rtw89_pci_l1ss_set_ax(struct rtw89_dev * rtwdev,bool enable)4036 static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable)
4037 {
4038 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4039 int ret;
4040
4041 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4042 if (enable)
4043 ret = rtw89_pci_config_byte_set(rtwdev,
4044 RTW89_PCIE_TIMER_CTRL,
4045 RTW89_PCIE_BIT_L1SUB);
4046 else
4047 ret = rtw89_pci_config_byte_clr(rtwdev,
4048 RTW89_PCIE_TIMER_CTRL,
4049 RTW89_PCIE_BIT_L1SUB);
4050 if (ret)
4051 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
4052 enable ? "set" : "unset", ret);
4053 } else if (chip_id == RTL8852C) {
4054 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
4055 RTW89_PCIE_BIT_ASPM_L11 |
4056 RTW89_PCIE_BIT_PCI_L11);
4057 if (ret)
4058 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
4059 if (enable)
4060 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4061 B_AX_L1SUB_DISABLE);
4062 else
4063 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4064 B_AX_L1SUB_DISABLE);
4065 }
4066 }
4067
rtw89_pci_l1ss_cfg(struct rtw89_dev * rtwdev)4068 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
4069 {
4070 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4071 struct pci_dev *pdev = rtwpci->pdev;
4072 u32 l1ss_cap_ptr, l1ss_ctrl;
4073
4074 if (rtw89_pci_disable_l1ss)
4075 return;
4076
4077 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
4078 if (!l1ss_cap_ptr)
4079 return;
4080
4081 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl);
4082
4083 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
4084 rtw89_pci_l1ss_set(rtwdev, true);
4085 }
4086
rtw89_pci_cpl_timeout_cfg(struct rtw89_dev * rtwdev)4087 static void rtw89_pci_cpl_timeout_cfg(struct rtw89_dev *rtwdev)
4088 {
4089 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4090 struct pci_dev *pdev = rtwpci->pdev;
4091
4092 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
4093 PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
4094 }
4095
rtw89_pci_poll_io_idle_ax(struct rtw89_dev * rtwdev)4096 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev)
4097 {
4098 int ret = 0;
4099 u32 sts;
4100 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
4101
4102 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
4103 10, 1000, false, rtwdev,
4104 R_AX_PCIE_DMA_BUSY1);
4105 if (ret) {
4106 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
4107 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
4108 return -EINVAL;
4109 }
4110 return ret;
4111 }
4112
rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev * rtwdev)4113 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev)
4114 {
4115 u32 val;
4116 int ret;
4117
4118 if (rtwdev->chip->chip_id == RTL8852C)
4119 return 0;
4120
4121 rtw89_pci_ctrl_dma_all(rtwdev, false);
4122 ret = rtw89_pci_poll_io_idle_ax(rtwdev);
4123 if (ret) {
4124 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
4125 rtw89_debug(rtwdev, RTW89_DBG_HCI,
4126 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
4127 R_AX_DBG_ERR_FLAG, val);
4128 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
4129 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false);
4130 if (val & B_AX_RX_STUCK)
4131 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false);
4132 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
4133 ret = rtw89_pci_poll_io_idle_ax(rtwdev);
4134 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
4135 rtw89_debug(rtwdev, RTW89_DBG_HCI,
4136 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
4137 R_AX_DBG_ERR_FLAG, val);
4138 }
4139
4140 return ret;
4141 }
4142
rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev * rtwdev)4143 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
4144 {
4145 u32 ret;
4146
4147 if (rtwdev->chip->chip_id == RTL8852C)
4148 return 0;
4149
4150 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
4151 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
4152 rtw89_pci_clr_idx_all(rtwdev);
4153
4154 ret = rtw89_pci_rst_bdram_ax(rtwdev);
4155 if (ret)
4156 return ret;
4157
4158 rtw89_pci_ctrl_dma_all(rtwdev, true);
4159 return ret;
4160 }
4161
rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev * rtwdev,enum rtw89_lv1_rcvy_step step)4162 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
4163 enum rtw89_lv1_rcvy_step step)
4164 {
4165 const struct rtw89_pci_info *info = rtwdev->pci_info;
4166 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4167 int ret;
4168
4169 switch (step) {
4170 case RTW89_LV1_RCVY_STEP_1:
4171 ret = gen_def->lv1rst_stop_dma(rtwdev);
4172 if (ret)
4173 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
4174
4175 break;
4176
4177 case RTW89_LV1_RCVY_STEP_2:
4178 ret = gen_def->lv1rst_start_dma(rtwdev);
4179 if (ret)
4180 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
4181 break;
4182
4183 default:
4184 return -EINVAL;
4185 }
4186
4187 return ret;
4188 }
4189
rtw89_pci_ops_dump_err_status(struct rtw89_dev * rtwdev)4190 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
4191 {
4192 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
4193 return;
4194
4195 if (rtwdev->chip->chip_id == RTL8852C) {
4196 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4197 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1));
4198 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4199 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1));
4200 } else {
4201 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
4202 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
4203 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4204 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
4205 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4206 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
4207 }
4208 }
4209
rtw89_pci_napi_poll(struct napi_struct * napi,int budget)4210 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
4211 {
4212 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
4213 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4214 const struct rtw89_pci_info *info = rtwdev->pci_info;
4215 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4216 unsigned long flags;
4217 int work_done;
4218
4219 rtwdev->napi_budget_countdown = budget;
4220
4221 rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data);
4222 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
4223 if (work_done == budget)
4224 return budget;
4225
4226 rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data);
4227 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
4228 if (work_done < budget && napi_complete_done(napi, work_done)) {
4229 spin_lock_irqsave(&rtwpci->irq_lock, flags);
4230 if (likely(rtwpci->running))
4231 rtw89_chip_enable_intr(rtwdev, rtwpci);
4232 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
4233 }
4234
4235 return work_done;
4236 }
4237
4238 static
rtw89_check_pci_ssid_quirks(struct rtw89_dev * rtwdev,struct pci_dev * pdev,const struct rtw89_pci_ssid_quirk * ssid_quirks)4239 void rtw89_check_pci_ssid_quirks(struct rtw89_dev *rtwdev,
4240 struct pci_dev *pdev,
4241 const struct rtw89_pci_ssid_quirk *ssid_quirks)
4242 {
4243 int i;
4244
4245 if (!ssid_quirks)
4246 return;
4247
4248 for (i = 0; i < 200; i++, ssid_quirks++) {
4249 if (ssid_quirks->vendor == 0 && ssid_quirks->device == 0)
4250 break;
4251
4252 if (ssid_quirks->vendor != pdev->vendor ||
4253 ssid_quirks->device != pdev->device ||
4254 ssid_quirks->subsystem_vendor != pdev->subsystem_vendor ||
4255 ssid_quirks->subsystem_device != pdev->subsystem_device)
4256 continue;
4257
4258 bitmap_or(rtwdev->quirks, rtwdev->quirks, &ssid_quirks->bitmap,
4259 NUM_OF_RTW89_QUIRKS);
4260 rtwdev->custid = ssid_quirks->custid;
4261 break;
4262 }
4263
4264 rtw89_debug(rtwdev, RTW89_DBG_HCI, "quirks=%*ph custid=%d\n",
4265 (int)sizeof(rtwdev->quirks), rtwdev->quirks, rtwdev->custid);
4266 }
4267
rtw89_pci_suspend(struct device * dev)4268 static int __maybe_unused rtw89_pci_suspend(struct device *dev)
4269 {
4270 struct ieee80211_hw *hw = dev_get_drvdata(dev);
4271 struct rtw89_dev *rtwdev = hw->priv;
4272 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4273
4274 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4275 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4276 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4277 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4278 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
4279 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4280 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
4281 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4282 } else {
4283 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4284 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4285 }
4286
4287 return 0;
4288 }
4289
rtw89_pci_l2_hci_ldo(struct rtw89_dev * rtwdev)4290 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
4291 {
4292 if (rtwdev->chip->chip_id == RTL8852C)
4293 return;
4294
4295 /* Hardware need write the reg twice to ensure the setting work */
4296 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4297 RTW89_PCIE_BIT_CFG_RST_MSTATE);
4298 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4299 RTW89_PCIE_BIT_CFG_RST_MSTATE);
4300 }
4301
rtw89_pci_basic_cfg(struct rtw89_dev * rtwdev,bool resume)4302 void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume)
4303 {
4304 if (resume)
4305 rtw89_pci_cfg_dac(rtwdev);
4306
4307 rtw89_pci_disable_eq(rtwdev);
4308 rtw89_pci_filter_out(rtwdev);
4309 rtw89_pci_cpl_timeout_cfg(rtwdev);
4310 rtw89_pci_link_cfg(rtwdev);
4311 rtw89_pci_l1ss_cfg(rtwdev);
4312 }
4313
rtw89_pci_resume(struct device * dev)4314 static int __maybe_unused rtw89_pci_resume(struct device *dev)
4315 {
4316 struct ieee80211_hw *hw = dev_get_drvdata(dev);
4317 struct rtw89_dev *rtwdev = hw->priv;
4318 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4319
4320 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4321 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4322 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4323 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4324 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
4325 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4326 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
4327 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4328 } else {
4329 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4330 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4331 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4332 B_AX_SEL_REQ_ENTR_L1);
4333 }
4334 rtw89_pci_l2_hci_ldo(rtwdev);
4335
4336 rtw89_pci_basic_cfg(rtwdev, true);
4337
4338 return 0;
4339 }
4340
4341 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
4342 EXPORT_SYMBOL(rtw89_pm_ops);
4343
4344 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
4345 .isr_rdu = B_AX_RDU_INT,
4346 .isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
4347 .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN,
4348 .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT},
4349 .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT |
4350 B_AX_RDU_INT},
4351
4352 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
4353 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
4354 .mac_post_init = rtw89_pci_ops_mac_post_init_ax,
4355
4356 .clr_idx_all = rtw89_pci_clr_idx_all_ax,
4357 .rst_bdram = rtw89_pci_rst_bdram_ax,
4358
4359 .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax,
4360 .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax,
4361
4362 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax,
4363 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax,
4364 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax,
4365
4366 .aspm_set = rtw89_pci_aspm_set_ax,
4367 .clkreq_set = rtw89_pci_clkreq_set_ax,
4368 .l1ss_set = rtw89_pci_l1ss_set_ax,
4369
4370 .disable_eq = rtw89_pci_disable_eq_ax,
4371 .power_wake = rtw89_pci_power_wake_ax,
4372 };
4373 EXPORT_SYMBOL(rtw89_pci_gen_ax);
4374
4375 static const struct rtw89_hci_ops rtw89_pci_ops = {
4376 .tx_write = rtw89_pci_ops_tx_write,
4377 .tx_kick_off = rtw89_pci_ops_tx_kick_off,
4378 .flush_queues = rtw89_pci_ops_flush_queues,
4379 .reset = rtw89_pci_ops_reset,
4380 .start = rtw89_pci_ops_start,
4381 .stop = rtw89_pci_ops_stop,
4382 .pause = rtw89_pci_ops_pause,
4383 .switch_mode = rtw89_pci_ops_switch_mode,
4384 .recalc_int_mit = rtw89_pci_recalc_int_mit,
4385
4386 .read8 = rtw89_pci_ops_read8,
4387 .read16 = rtw89_pci_ops_read16,
4388 .read32 = rtw89_pci_ops_read32,
4389 .write8 = rtw89_pci_ops_write8,
4390 .write16 = rtw89_pci_ops_write16,
4391 .write32 = rtw89_pci_ops_write32,
4392
4393 .mac_pre_init = rtw89_pci_ops_mac_pre_init,
4394 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit,
4395 .mac_post_init = rtw89_pci_ops_mac_post_init,
4396 .deinit = rtw89_pci_ops_deinit,
4397
4398 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
4399 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery,
4400 .dump_err_status = rtw89_pci_ops_dump_err_status,
4401 .napi_poll = rtw89_pci_napi_poll,
4402
4403 .recovery_start = rtw89_pci_ops_recovery_start,
4404 .recovery_complete = rtw89_pci_ops_recovery_complete,
4405
4406 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch,
4407 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch,
4408 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx,
4409 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle,
4410
4411 .clr_idx_all = rtw89_pci_clr_idx_all,
4412 .clear = rtw89_pci_clear_resource,
4413 .disable_intr = rtw89_pci_disable_intr_lock,
4414 .enable_intr = rtw89_pci_enable_intr_lock,
4415 .rst_bdram = rtw89_pci_reset_bdram,
4416 };
4417
rtw89_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)4418 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4419 {
4420 struct rtw89_dev *rtwdev;
4421 const struct rtw89_driver_info *info;
4422 const struct rtw89_pci_info *pci_info;
4423 int ret;
4424
4425 info = (const struct rtw89_driver_info *)id->driver_data;
4426
4427 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
4428 sizeof(struct rtw89_pci),
4429 info->chip, info->variant);
4430 if (!rtwdev) {
4431 dev_err(&pdev->dev, "failed to allocate hw\n");
4432 return -ENOMEM;
4433 }
4434
4435 pci_info = info->bus.pci;
4436
4437 rtwdev->pci_info = info->bus.pci;
4438 rtwdev->hci.ops = &rtw89_pci_ops;
4439 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
4440 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
4441 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
4442
4443 rtw89_check_quirks(rtwdev, info->quirks);
4444 rtw89_check_pci_ssid_quirks(rtwdev, pdev, pci_info->ssid_quirks);
4445
4446 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
4447
4448 ret = rtw89_core_init(rtwdev);
4449 if (ret) {
4450 rtw89_err(rtwdev, "failed to initialise core\n");
4451 goto err_release_hw;
4452 }
4453
4454 ret = rtw89_pci_claim_device(rtwdev, pdev);
4455 if (ret) {
4456 rtw89_err(rtwdev, "failed to claim pci device\n");
4457 goto err_core_deinit;
4458 }
4459
4460 ret = rtw89_pci_setup_resource(rtwdev, pdev);
4461 if (ret) {
4462 rtw89_err(rtwdev, "failed to setup pci resource\n");
4463 goto err_declaim_pci;
4464 }
4465
4466 ret = rtw89_chip_info_setup(rtwdev);
4467 if (ret) {
4468 rtw89_err(rtwdev, "failed to setup chip information\n");
4469 goto err_clear_resource;
4470 }
4471
4472 rtw89_pci_basic_cfg(rtwdev, false);
4473
4474 ret = rtw89_core_napi_init(rtwdev);
4475 if (ret) {
4476 rtw89_err(rtwdev, "failed to init napi\n");
4477 goto err_clear_resource;
4478 }
4479
4480 ret = rtw89_pci_request_irq(rtwdev, pdev);
4481 if (ret) {
4482 rtw89_err(rtwdev, "failed to request pci irq\n");
4483 goto err_deinit_napi;
4484 }
4485
4486 ret = rtw89_core_register(rtwdev);
4487 if (ret) {
4488 rtw89_err(rtwdev, "failed to register core\n");
4489 goto err_free_irq;
4490 }
4491
4492 set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags);
4493
4494 return 0;
4495
4496 err_free_irq:
4497 rtw89_pci_free_irq(rtwdev, pdev);
4498 err_deinit_napi:
4499 rtw89_core_napi_deinit(rtwdev);
4500 err_clear_resource:
4501 rtw89_pci_clear_resource(rtwdev, pdev);
4502 err_declaim_pci:
4503 rtw89_pci_declaim_device(rtwdev, pdev);
4504 err_core_deinit:
4505 rtw89_core_deinit(rtwdev);
4506 err_release_hw:
4507 rtw89_free_ieee80211_hw(rtwdev);
4508
4509 return ret;
4510 }
4511 EXPORT_SYMBOL(rtw89_pci_probe);
4512
rtw89_pci_remove(struct pci_dev * pdev)4513 void rtw89_pci_remove(struct pci_dev *pdev)
4514 {
4515 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
4516 struct rtw89_dev *rtwdev;
4517
4518 rtwdev = hw->priv;
4519
4520 rtw89_pci_free_irq(rtwdev, pdev);
4521 rtw89_core_napi_deinit(rtwdev);
4522 rtw89_core_unregister(rtwdev);
4523 rtw89_pci_clear_resource(rtwdev, pdev);
4524 rtw89_pci_declaim_device(rtwdev, pdev);
4525 rtw89_core_deinit(rtwdev);
4526 rtw89_free_ieee80211_hw(rtwdev);
4527 }
4528 EXPORT_SYMBOL(rtw89_pci_remove);
4529
4530 MODULE_AUTHOR("Realtek Corporation");
4531 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver");
4532 MODULE_LICENSE("Dual BSD/GPL");
4533