Lines Matching +full:exported +full:- +full:sram

1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright (C) 2003-2015, 2018-2025 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
18 #include "iwl-fh.h"
19 #include "iwl-csr.h"
20 #include "iwl-trans.h"
21 #include "iwl-debug.h"
22 #include "iwl-io.h"
23 #include "iwl-op-mode.h"
24 #include "iwl-drv.h"
25 #include "iwl-context-info.h"
46 * @invalid: rxb is in driver ownership - not owned by HW
76 * struct iwl_rx_transfer_desc - transfer descriptor
90 * struct iwl_rx_completion_desc - completion descriptor
104 * struct iwl_rx_completion_desc_bz - Bz completion descriptor
116 * struct iwl_rxq - Rx queue
119 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
128 * @free_count: Number of pre-allocated buffers in rx_free
136 * @lock: per-queue lock
137 * @queue: actual rx queue. Not used for multi-rx queue.
168 * struct iwl_rb_allocator - Rx allocator
190 * iwl_get_closed_rb_stts - get closed rb stts from different structs
197 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_get_closed_rb_stts()
198 __le16 *rb_stts = rxq->rb_stts; in iwl_get_closed_rb_stts()
202 struct iwl_rb_status *rb_stts = rxq->rb_stts; in iwl_get_closed_rb_stts()
204 return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF; in iwl_get_closed_rb_stts()
210 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
226 * enum iwl_shared_irq_flags - level of sharing for irq
236 * enum iwl_image_response_code - image response values
275 * enum iwl_pcie_imr_status - imr dma transfer state
289 * struct iwl_pcie_txqs - TX queues data
292 * @page_offs: offset from skb->cb to mac header page pointer
293 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
301 * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
341 * struct iwl_trans_pcie - PCIe transport specific data
357 * @scd_base_addr: scheduler sram base address in SRAM
362 * @pci_dev: basic pci-network driver stuff
366 * @cmd_queue - command queue number
376 * @msix_entries: array of MSI-X entries
377 * @msix_enabled: true if managed to enable MSI-X
401 * @inta_mask: interrupt (INT-A) mask
413 * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
526 return (void *)trans->trans_specific; in IWL_TRANS_GET_PCIE_TRANS()
535 * re-enabled by clearing this bit. This register is defined as in iwl_pcie_clear_irq()
587 * ICT - interrupt handling
602 #define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
611 * that no TB referencing this page can trigger the 32-bit boundary hardware
620 #define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
659 res = IWL_TSO_PAGE_INFO(addr)->dma_addr; in iwl_pcie_get_tso_page_phys()
668 return txq->first_tb_dma + in iwl_txq_get_first_tb_dma()
674 return index & (q->n_window - 1); in iwl_txq_get_cmd_index()
682 if (trans->trans_cfg->gen2) in iwl_txq_get_tfd()
685 return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx; in iwl_txq_get_tfd()
689 * We need this inline in case dma_addr_t is only 32-bits - since the
690 * hardware is always 64-bit, the issue can still occur in that case,
691 * so use u64 for 'phys' here to force the addition in 64-bit.
704 if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) { in iwl_txq_stop()
705 iwl_op_mode_queue_full(trans->op_mode, txq->id); in iwl_txq_stop()
706 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); in iwl_txq_stop()
709 txq->id); in iwl_txq_stop()
714 * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
721 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); in iwl_txq_inc_wrap()
725 * iwl_txq_dec_wrap - decrement queue index, wrap back to end
731 return --index & in iwl_txq_dec_wrap()
732 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); in iwl_txq_dec_wrap()
742 if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) { in iwl_trans_pcie_wake_queue()
743 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); in iwl_trans_pcie_wake_queue()
744 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); in iwl_trans_pcie_wake_queue()
755 tfd->num_tbs = 0; in iwl_txq_set_tfd_invalid_gen2()
757 iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, in iwl_txq_set_tfd_invalid_gen2()
758 trans->invalid_tx_cmd.size); in iwl_txq_set_tfd_invalid_gen2()
785 if (trans->trans_cfg->gen2) { in iwl_txq_gen1_tfd_tb_get_len()
787 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; in iwl_txq_gen1_tfd_tb_get_len()
789 return le16_to_cpu(tfh_tb->tb_len); in iwl_txq_gen1_tfd_tb_get_len()
793 tb = &tfd->tbs[idx]; in iwl_txq_gen1_tfd_tb_get_len()
795 return le16_to_cpu(tb->hi_n_len) >> 4; in iwl_txq_gen1_tfd_tb_get_len()
818 clear_bit(STATUS_INT_ENABLED, &trans->status); in _iwl_disable_interrupts()
819 if (!trans_pcie->msix_enabled) { in _iwl_disable_interrupts()
830 trans_pcie->fh_init_mask); in _iwl_disable_interrupts()
832 trans_pcie->hw_init_mask); in _iwl_disable_interrupts()
842 while (start < fw->num_sec && in iwl_pcie_get_num_sections()
843 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && in iwl_pcie_get_num_sections()
844 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { in iwl_pcie_get_num_sections()
854 struct iwl_self_init_dram *dram = &trans->init_dram; in iwl_pcie_ctxt_info_free_fw_img()
857 if (!dram->fw) { in iwl_pcie_ctxt_info_free_fw_img()
858 WARN_ON(dram->fw_cnt); in iwl_pcie_ctxt_info_free_fw_img()
862 for (i = 0; i < dram->fw_cnt; i++) in iwl_pcie_ctxt_info_free_fw_img()
863 dma_free_coherent(trans->dev, dram->fw[i].size, in iwl_pcie_ctxt_info_free_fw_img()
864 dram->fw[i].block, dram->fw[i].physical); in iwl_pcie_ctxt_info_free_fw_img()
866 kfree(dram->fw); in iwl_pcie_ctxt_info_free_fw_img()
867 dram->fw_cnt = 0; in iwl_pcie_ctxt_info_free_fw_img()
868 dram->fw = NULL; in iwl_pcie_ctxt_info_free_fw_img()
875 spin_lock_bh(&trans_pcie->irq_lock); in iwl_disable_interrupts()
877 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_disable_interrupts()
885 set_bit(STATUS_INT_ENABLED, &trans->status); in _iwl_enable_interrupts()
886 if (!trans_pcie->msix_enabled) { in _iwl_enable_interrupts()
887 trans_pcie->inta_mask = CSR_INI_SET_MASK; in _iwl_enable_interrupts()
888 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in _iwl_enable_interrupts()
894 trans_pcie->hw_mask = trans_pcie->hw_init_mask; in _iwl_enable_interrupts()
895 trans_pcie->fh_mask = trans_pcie->fh_init_mask; in _iwl_enable_interrupts()
897 ~trans_pcie->fh_mask); in _iwl_enable_interrupts()
899 ~trans_pcie->hw_mask); in _iwl_enable_interrupts()
907 spin_lock_bh(&trans_pcie->irq_lock); in iwl_enable_interrupts()
909 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_enable_interrupts()
916 trans_pcie->hw_mask = msk; in iwl_enable_hw_int_msk_msix()
924 trans_pcie->fh_mask = msk; in iwl_enable_fh_int_msk_msix()
932 if (!trans_pcie->msix_enabled) { in iwl_enable_fw_load_int()
933 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; in iwl_enable_fw_load_int()
934 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in iwl_enable_fw_load_int()
937 trans_pcie->hw_init_mask); in iwl_enable_fw_load_int()
949 if (!trans_pcie->msix_enabled) { in iwl_enable_fw_load_int_ctx_info()
957 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; in iwl_enable_fw_load_int_ctx_info()
958 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in iwl_enable_fw_load_int_ctx_info()
966 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); in iwl_enable_fw_load_int_ctx_info()
973 if (trans_p->shared_vec_mask) { in queue_name()
974 int vec = trans_p->shared_vec_mask & in queue_name()
986 if (i == trans_p->alloc_vecs - 1) in queue_name()
998 if (!trans_pcie->msix_enabled) { in iwl_enable_rfkill_int()
999 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; in iwl_enable_rfkill_int()
1000 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in iwl_enable_rfkill_int()
1003 trans_pcie->fh_init_mask); in iwl_enable_rfkill_int()
1008 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { in iwl_enable_rfkill_int()
1010 * On 9000-series devices this bit isn't enabled by default, so in iwl_enable_rfkill_int()
1012 * to wake up the PCI-E bus for RF-kill interrupts. in iwl_enable_rfkill_int()
1025 lockdep_assert_held(&trans_pcie->mutex); in iwl_is_rfkill_set()
1027 if (trans_pcie->debug_rfkill == 1) in iwl_is_rfkill_set()
1063 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); in iwl_pcie_dbg_on()
1110 /* transport gen 1 exported functions */
1134 /* transport gen 2 exported functions */