Lines Matching +full:dfx +full:- +full:bus

1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/dma-mapping.h>
57 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
65 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
68 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
258 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
272 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
388 * struct qm_hw_err - Structure describing the device errors
479 return -EINVAL; in hisi_qm_q_num_set()
487 if (pdev->revision == QM_HW_V1) in hisi_qm_q_num_set()
497 return -EINVAL; in hisi_qm_q_num_set()
505 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_get_hw_error_status()
510 return qm->err_ini->get_dev_hw_err_status(qm); in qm_get_dev_err_status()
516 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in qm_check_dev_error()
519 if (pf_qm->fun_type == QM_HW_VF) in qm_check_dev_error()
523 if (err_status & pf_qm->err_info.qm_shutdown_mask) in qm_check_dev_error()
526 if (pf_qm->err_ini->dev_is_abnormal) in qm_check_dev_error()
527 return pf_qm->err_ini->dev_is_abnormal(pf_qm); in qm_check_dev_error()
537 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_wait_reset_finish()
540 return -EBUSY; in qm_wait_reset_finish()
548 struct pci_dev *pdev = qm->pdev; in qm_reset_prepare_ready()
555 if (qm->ver < QM_HW_V3) in qm_reset_prepare_ready()
563 struct pci_dev *pdev = qm->pdev; in qm_reset_bit_clear()
566 if (qm->ver < QM_HW_V3) in qm_reset_bit_clear()
567 clear_bit(QM_RESETTING, &pf_qm->misc_ctl); in qm_reset_bit_clear()
569 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_reset_bit_clear()
575 mailbox->w0 = cpu_to_le16((cmd) | in qm_mb_pre_init()
578 mailbox->queue_num = cpu_to_le16(queue); in qm_mb_pre_init()
579 mailbox->base_l = cpu_to_le32(lower_32_bits(base)); in qm_mb_pre_init()
580 mailbox->base_h = cpu_to_le32(upper_32_bits(base)); in qm_mb_pre_init()
581 mailbox->rsvd = 0; in qm_mb_pre_init()
584 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
589 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, in hisi_qm_wait_mb_ready()
598 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; in qm_mb_write()
628 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); in qm_mb_nolock()
629 ret = -EBUSY; in qm_mb_nolock()
636 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); in qm_mb_nolock()
637 ret = -ETIMEDOUT; in qm_mb_nolock()
641 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); in qm_mb_nolock()
643 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); in qm_mb_nolock()
644 ret = -EIO; in qm_mb_nolock()
651 atomic64_inc(&qm->debug.dfx.mb_err_cnt); in qm_mb_nolock()
663 mutex_lock(&qm->mailbox_lock); in hisi_qm_mb()
665 mutex_unlock(&qm->mailbox_lock); in hisi_qm_mb()
683 tmp_xqc = qm->xqc_buf.sqc; in qm_set_and_get_xqc()
684 xqc_dma = qm->xqc_buf.sqc_dma; in qm_set_and_get_xqc()
688 tmp_xqc = qm->xqc_buf.cqc; in qm_set_and_get_xqc()
689 xqc_dma = qm->xqc_buf.cqc_dma; in qm_set_and_get_xqc()
693 tmp_xqc = qm->xqc_buf.eqc; in qm_set_and_get_xqc()
694 xqc_dma = qm->xqc_buf.eqc_dma; in qm_set_and_get_xqc()
698 tmp_xqc = qm->xqc_buf.aeqc; in qm_set_and_get_xqc()
699 xqc_dma = qm->xqc_buf.aeqc_dma; in qm_set_and_get_xqc()
702 dev_err(&qm->pdev->dev, "unknown mailbox cmd %u\n", cmd); in qm_set_and_get_xqc()
703 return -EINVAL; in qm_set_and_get_xqc()
708 dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n"); in qm_set_and_get_xqc()
709 return -EIO; in qm_set_and_get_xqc()
712 mutex_lock(&qm->mailbox_lock); in qm_set_and_get_xqc()
721 mutex_unlock(&qm->mailbox_lock); in qm_set_and_get_xqc()
734 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); in qm_db_v1()
739 void __iomem *io_base = qm->io_base; in qm_db_v2()
744 io_base = qm->db_io_base + (u64)qn * qm->db_interval + in qm_db_v2()
759 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", in qm_db()
762 qm->ops->qm_db(qm, qn, cmd, index, priority); in qm_db()
770 if (qm->ver < QM_HW_V3) in qm_disable_clock_gate()
773 val = readl(qm->io_base + QM_PM_CTRL); in qm_disable_clock_gate()
775 writel(val, qm->io_base + QM_PM_CTRL); in qm_disable_clock_gate()
782 writel(0x1, qm->io_base + QM_MEM_START_INIT); in qm_dev_mem_reset()
783 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, in qm_dev_mem_reset()
789 * hisi_qm_get_hw_info() - Get device information.
803 switch (qm->ver) { in hisi_qm_get_hw_info()
812 val = readl(qm->io_base + info_table[index].offset); in hisi_qm_get_hw_info()
824 switch (qm->ver) { in hisi_qm_get_cap_value()
833 val = readl(qm->io_base + info_table[index].offset); in hisi_qm_get_cap_value()
844 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); in qm_get_xqc_depth()
852 struct device *dev = &qm->pdev->dev; in hisi_qm_set_algs()
856 if (!qm->uacce) in hisi_qm_set_algs()
862 return -EINVAL; in hisi_qm_set_algs()
867 return -ENOMEM; in hisi_qm_set_algs()
877 qm->uacce->algs = algs; in hisi_qm_set_algs()
885 if (qm->fun_type == QM_HW_PF) in qm_get_irq_num()
886 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); in qm_get_irq_num()
888 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); in qm_get_irq_num()
893 struct device *dev = &qm->pdev->dev; in qm_pm_get_sync()
896 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in qm_pm_get_sync()
910 struct device *dev = &qm->pdev->dev; in qm_pm_put_sync()
912 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in qm_pm_put_sync()
921 if (qp->qp_status.cq_head == qp->cq_depth - 1) { in qm_cq_head_update()
922 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; in qm_cq_head_update()
923 qp->qp_status.cq_head = 0; in qm_cq_head_update()
925 qp->qp_status.cq_head++; in qm_cq_head_update()
931 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb()
932 struct hisi_qm *qm = qp->qm; in qm_poll_req_cb()
934 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { in qm_poll_req_cb()
936 qp->req_cb(qp, qp->sqe + qm->sqe_size * in qm_poll_req_cb()
937 le16_to_cpu(cqe->sq_head)); in qm_poll_req_cb()
939 cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb()
940 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, in qm_poll_req_cb()
941 qp->qp_status.cq_head, 0); in qm_poll_req_cb()
942 atomic_dec(&qp->qp_status.used); in qm_poll_req_cb()
948 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); in qm_poll_req_cb()
955 struct hisi_qm *qm = poll_data->qm; in qm_work_process()
956 u16 eqe_num = poll_data->eqe_num; in qm_work_process()
960 for (i = eqe_num - 1; i >= 0; i--) { in qm_work_process()
961 qp = &qm->qp_array[poll_data->qp_finish_id[i]]; in qm_work_process()
962 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) in qm_work_process()
965 if (qp->event_cb) { in qm_work_process()
966 qp->event_cb(qp); in qm_work_process()
970 if (likely(qp->req_cb)) in qm_work_process()
977 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; in qm_get_complete_eqe_num()
979 u16 eq_depth = qm->eq_depth; in qm_get_complete_eqe_num()
982 if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { in qm_get_complete_eqe_num()
983 atomic64_inc(&qm->debug.dfx.err_irq_cnt); in qm_get_complete_eqe_num()
984 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
988 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; in qm_get_complete_eqe_num()
989 if (unlikely(cqn >= qm->qp_num)) in qm_get_complete_eqe_num()
991 poll_data = &qm->poll_data[cqn]; in qm_get_complete_eqe_num()
993 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { in qm_get_complete_eqe_num()
994 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; in qm_get_complete_eqe_num()
995 poll_data->qp_finish_id[eqe_num] = cqn; in qm_get_complete_eqe_num()
998 if (qm->status.eq_head == eq_depth - 1) { in qm_get_complete_eqe_num()
999 qm->status.eqc_phase = !qm->status.eqc_phase; in qm_get_complete_eqe_num()
1000 eqe = qm->eqe; in qm_get_complete_eqe_num()
1001 qm->status.eq_head = 0; in qm_get_complete_eqe_num()
1004 qm->status.eq_head++; in qm_get_complete_eqe_num()
1007 if (eqe_num == (eq_depth >> 1) - 1) in qm_get_complete_eqe_num()
1011 poll_data->eqe_num = eqe_num; in qm_get_complete_eqe_num()
1012 queue_work(qm->wq, &poll_data->work); in qm_get_complete_eqe_num()
1013 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
1020 /* Get qp id of completed tasks and re-enable the interrupt */ in qm_eq_irq()
1031 val = readl(qm->io_base + QM_IFC_INT_STATUS); in qm_mb_cmd_irq()
1036 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { in qm_mb_cmd_irq()
1037 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); in qm_mb_cmd_irq()
1041 schedule_work(&qm->cmd_process); in qm_mb_cmd_irq()
1050 if (qp->is_in_kernel) in qm_set_qp_disable()
1053 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; in qm_set_qp_disable()
1062 struct hisi_qp *qp = &qm->qp_array[qp_id]; in qm_disable_qp()
1071 struct device *dev = &qm->pdev->dev; in qm_reset_function()
1100 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; in qm_aeq_thread()
1101 u16 aeq_depth = qm->aeq_depth; in qm_aeq_thread()
1104 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); in qm_aeq_thread()
1106 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { in qm_aeq_thread()
1107 type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) & in qm_aeq_thread()
1109 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; in qm_aeq_thread()
1113 dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); in qm_aeq_thread()
1117 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", in qm_aeq_thread()
1124 dev_err(&qm->pdev->dev, "unknown error type %u\n", in qm_aeq_thread()
1129 if (qm->status.aeq_head == aeq_depth - 1) { in qm_aeq_thread()
1130 qm->status.aeqc_phase = !qm->status.aeqc_phase; in qm_aeq_thread()
1131 aeqe = qm->aeqe; in qm_aeq_thread()
1132 qm->status.aeq_head = 0; in qm_aeq_thread()
1135 qm->status.aeq_head++; in qm_aeq_thread()
1139 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_aeq_thread()
1146 struct hisi_qp_status *qp_status = &qp->qp_status; in qm_init_qp_status()
1148 qp_status->sq_tail = 0; in qm_init_qp_status()
1149 qp_status->cq_head = 0; in qm_init_qp_status()
1150 qp_status->cqc_phase = true; in qm_init_qp_status()
1151 atomic_set(&qp_status->used, 0); in qm_init_qp_status()
1156 struct device *dev = &qm->pdev->dev; in qm_init_prefetch()
1159 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) in qm_init_prefetch()
1177 writel(page_type, qm->io_base + QM_PAGE_SIZE); in qm_init_prefetch()
1187 * IR(Mbps) = -------------------------
1227 factor->cbs_s = acc_shaper_calc_cbs_s(ir); in qm_get_shaper_para()
1234 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; in qm_get_shaper_para()
1236 factor->cir_b = cir_b; in qm_get_shaper_para()
1237 factor->cir_u = cir_u; in qm_get_shaper_para()
1238 factor->cir_s = cir_s; in qm_get_shaper_para()
1244 return -EINVAL; in qm_get_shaper_para()
1255 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1264 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; in qm_vft_data_cfg()
1268 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1279 tmp = factor->cir_b | in qm_vft_data_cfg()
1280 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | in qm_vft_data_cfg()
1281 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | in qm_vft_data_cfg()
1283 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); in qm_vft_data_cfg()
1289 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); in qm_vft_data_cfg()
1290 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); in qm_vft_data_cfg()
1300 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in qm_set_vft_common()
1301 factor = &qm->factor[fun_num]; in qm_set_vft_common()
1303 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1309 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); in qm_set_vft_common()
1310 writel(type, qm->io_base + QM_VFT_CFG_TYPE); in qm_set_vft_common()
1314 writel(fun_num, qm->io_base + QM_VFT_CFG); in qm_set_vft_common()
1318 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_set_vft_common()
1319 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_set_vft_common()
1321 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1328 u32 qos = qm->factor[fun_num].func_qos; in qm_shaper_init_vft()
1331 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); in qm_shaper_init_vft()
1333 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); in qm_shaper_init_vft()
1336 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); in qm_shaper_init_vft()
1360 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { in qm_set_sqc_cqc_vft()
1383 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_vft_v2()
1384 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_vft_v2()
1394 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v1()
1399 struct hisi_qm_err_info *err_info = &qm->err_info; in qm_hw_error_cfg()
1401 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; in qm_hw_error_cfg()
1403 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_cfg()
1406 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); in qm_hw_error_cfg()
1407 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); in qm_hw_error_cfg()
1408 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_cfg()
1409 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); in qm_hw_error_cfg()
1418 irq_unmask = ~qm->error_mask; in qm_hw_error_init_v2()
1419 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1420 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1425 u32 irq_mask = qm->error_mask; in qm_hw_error_uninit_v2()
1427 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1428 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1438 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_init_v3()
1440 irq_unmask = ~qm->error_mask; in qm_hw_error_init_v3()
1441 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1442 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1447 u32 irq_mask = qm->error_mask; in qm_hw_error_uninit_v3()
1449 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v3()
1450 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v3()
1453 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_uninit_v3()
1459 struct device *dev = &qm->pdev->dev; in qm_log_hw_error()
1465 if (!(err->int_msk & error_status)) in qm_log_hw_error()
1469 err->msg, err->int_msk); in qm_log_hw_error()
1471 if (err->int_msk & QM_DB_TIMEOUT) { in qm_log_hw_error()
1472 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); in qm_log_hw_error()
1479 } else if (err->int_msk & QM_OF_FIFO_OF) { in qm_log_hw_error()
1480 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); in qm_log_hw_error()
1490 } else if (err->int_msk & QM_AXI_RRESP_ERR) { in qm_log_hw_error()
1491 reg_val = readl(qm->io_base + QM_ABNORMAL_INF02); in qm_log_hw_error()
1503 if (error_status & qm->error_mask) { in qm_hw_error_handle_v2()
1505 qm->err_status.is_qm_ecc_mbit = true; in qm_hw_error_handle_v2()
1508 if (error_status & qm->err_info.qm_reset_mask) { in qm_hw_error_handle_v2()
1510 writel(qm->err_info.nfe & (~error_status), in qm_hw_error_handle_v2()
1511 qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_handle_v2()
1516 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_handle_v2()
1517 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_handle_v2()
1518 writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE); in qm_hw_error_handle_v2()
1530 mutex_lock(&qm->mailbox_lock); in qm_get_mb_cmd()
1535 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_mb_cmd()
1536 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_mb_cmd()
1539 mutex_unlock(&qm->mailbox_lock); in qm_get_mb_cmd()
1547 if (qm->fun_type == QM_HW_PF) in qm_clear_cmd_interrupt()
1548 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); in qm_clear_cmd_interrupt()
1550 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
1552 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
1557 struct device *dev = &qm->pdev->dev; in qm_handle_vf_msg()
1561 ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id); in qm_handle_vf_msg()
1585 struct device *dev = &qm->pdev->dev; in qm_wait_vf_prepare_finish()
1586 u32 vfs_num = qm->vfs_num; in qm_wait_vf_prepare_finish()
1592 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_wait_vf_prepare_finish()
1596 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_wait_vf_prepare_finish()
1602 ret = -EBUSY; in qm_wait_vf_prepare_finish()
1627 val = readl(qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
1630 writel(val, qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
1632 val = readl(qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
1634 writel(val, qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
1641 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
1643 writel(val, qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
1648 struct device *dev = &qm->pdev->dev; in qm_ping_single_vf()
1653 ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num); in qm_ping_single_vf()
1662 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_single_vf()
1669 ret = -ETIMEDOUT; in qm_ping_single_vf()
1675 qm->ops->set_ifc_end(qm); in qm_ping_single_vf()
1681 struct device *dev = &qm->pdev->dev; in qm_ping_all_vfs()
1682 u32 vfs_num = qm->vfs_num; in qm_ping_all_vfs()
1688 ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS); in qm_ping_all_vfs()
1691 qm->ops->set_ifc_end(qm); in qm_ping_all_vfs()
1698 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_all_vfs()
1701 qm->ops->set_ifc_end(qm); in qm_ping_all_vfs()
1709 qm->ops->set_ifc_end(qm); in qm_ping_all_vfs()
1717 return -ETIMEDOUT; in qm_ping_all_vfs()
1726 ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0); in qm_ping_pf()
1728 dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd); in qm_ping_pf()
1736 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_ping_pf()
1741 ret = -ETIMEDOUT; in qm_ping_pf()
1747 qm->ops->set_ifc_end(qm); in qm_ping_pf()
1759 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); in qm_stop_qp()
1764 struct pci_dev *pdev = qm->pdev; in qm_set_msi()
1767 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, in qm_set_msi()
1770 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, in qm_set_msi()
1772 if (qm->err_status.is_qm_ecc_mbit || in qm_set_msi()
1773 qm->err_status.is_dev_ecc_mbit) in qm_set_msi()
1777 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) in qm_set_msi()
1778 return -EFAULT; in qm_set_msi()
1786 struct pci_dev *pdev = qm->pdev; in qm_wait_msi_finish()
1793 pci_read_config_dword(pdev, pdev->msi_cap + in qm_wait_msi_finish()
1806 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, in qm_wait_msi_finish()
1812 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, in qm_wait_msi_finish()
1821 struct pci_dev *pdev = qm->pdev; in qm_set_msi_v3()
1822 int ret = -ETIMEDOUT; in qm_set_msi_v3()
1825 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); in qm_set_msi_v3()
1831 pci_write_config_dword(pdev, pdev->msi_cap, cmd); in qm_set_msi_v3()
1834 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); in qm_set_msi_v3()
1857 mutex_lock(&qm->mailbox_lock); in qm_set_ifc_begin_v3()
1863 mutex_unlock(&qm->mailbox_lock); in qm_set_ifc_end_v3()
1888 if (qm->fun_type == QM_HW_PF) in qm_set_ifc_begin_v4()
1895 mutex_lock(&qm->ifc_lock); in qm_set_ifc_begin_v4()
1896 writeq(msg, qm->io_base + offset); in qm_set_ifc_begin_v4()
1903 mutex_unlock(&qm->ifc_lock); in qm_set_ifc_end_v4()
1912 return (u64)readl(qm->io_base + offset); in qm_get_ifc_pf()
1917 return readq(qm->io_base + QM_PF2VF_VF_R); in qm_get_ifc_vf()
1924 if (qm->fun_type == QM_HW_PF) in qm_get_ifc_v4()
1978 struct hisi_qp_status *qp_status = &qp->qp_status; in qm_get_avail_sqe()
1979 u16 sq_tail = qp_status->sq_tail; in qm_get_avail_sqe()
1981 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) in qm_get_avail_sqe()
1984 return qp->sqe + sq_tail * qp->qm->sqe_size; in qm_get_avail_sqe()
1992 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; in hisi_qm_unset_hw_reset()
1998 struct device *dev = &qm->pdev->dev; in qm_create_qp_nolock()
2002 if (atomic_read(&qm->status.flags) == QM_STOP) { in qm_create_qp_nolock()
2004 return ERR_PTR(-EPERM); in qm_create_qp_nolock()
2007 if (qm->qp_in_used == qm->qp_num) { in qm_create_qp_nolock()
2009 qm->qp_num); in qm_create_qp_nolock()
2010 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
2011 return ERR_PTR(-EBUSY); in qm_create_qp_nolock()
2014 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); in qm_create_qp_nolock()
2017 qm->qp_num); in qm_create_qp_nolock()
2018 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
2019 return ERR_PTR(-EBUSY); in qm_create_qp_nolock()
2022 qp = &qm->qp_array[qp_id]; in qm_create_qp_nolock()
2024 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); in qm_create_qp_nolock()
2026 qp->event_cb = NULL; in qm_create_qp_nolock()
2027 qp->req_cb = NULL; in qm_create_qp_nolock()
2028 qp->qp_id = qp_id; in qm_create_qp_nolock()
2029 qp->alg_type = alg_type; in qm_create_qp_nolock()
2030 qp->is_in_kernel = true; in qm_create_qp_nolock()
2031 qm->qp_in_used++; in qm_create_qp_nolock()
2037 * hisi_qm_create_qp() - Create a queue pair from qm.
2052 down_write(&qm->qps_lock); in hisi_qm_create_qp()
2054 up_write(&qm->qps_lock); in hisi_qm_create_qp()
2063 * hisi_qm_release_qp() - Release a qp back to its qm.
2070 struct hisi_qm *qm = qp->qm; in hisi_qm_release_qp()
2072 down_write(&qm->qps_lock); in hisi_qm_release_qp()
2074 qm->qp_in_used--; in hisi_qm_release_qp()
2075 idr_remove(&qm->qp_idr, qp->qp_id); in hisi_qm_release_qp()
2077 up_write(&qm->qps_lock); in hisi_qm_release_qp()
2084 struct hisi_qm *qm = qp->qm; in qm_sq_ctx_cfg()
2085 enum qm_hw_ver ver = qm->ver; in qm_sq_ctx_cfg()
2089 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); in qm_sq_ctx_cfg()
2090 sqc.w8 = cpu_to_le16(qp->sq_depth - 1); in qm_sq_ctx_cfg()
2092 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); in qm_sq_ctx_cfg()
2095 sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); in qm_sq_ctx_cfg()
2096 sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma)); in qm_sq_ctx_cfg()
2097 sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma)); in qm_sq_ctx_cfg()
2101 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_sq_ctx_cfg()
2110 struct hisi_qm *qm = qp->qm; in qm_cq_ctx_cfg()
2111 enum qm_hw_ver ver = qm->ver; in qm_cq_ctx_cfg()
2116 cqc.w8 = cpu_to_le16(qp->cq_depth - 1); in qm_cq_ctx_cfg()
2118 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); in qm_cq_ctx_cfg()
2127 cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma)); in qm_cq_ctx_cfg()
2128 cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma)); in qm_cq_ctx_cfg()
2131 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_cq_ctx_cfg()
2152 struct hisi_qm *qm = qp->qm; in qm_start_qp_nolock()
2153 struct device *dev = &qm->pdev->dev; in qm_start_qp_nolock()
2154 int qp_id = qp->qp_id; in qm_start_qp_nolock()
2158 if (atomic_read(&qm->status.flags) == QM_STOP) { in qm_start_qp_nolock()
2160 return -EPERM; in qm_start_qp_nolock()
2167 atomic_set(&qp->qp_status.flags, QP_START); in qm_start_qp_nolock()
2174 * hisi_qm_start_qp() - Start a qp into running.
2183 struct hisi_qm *qm = qp->qm; in hisi_qm_start_qp()
2186 down_write(&qm->qps_lock); in hisi_qm_start_qp()
2188 up_write(&qm->qps_lock); in hisi_qm_start_qp()
2195 * qp_stop_fail_cb() - call request cb.
2202 int qp_used = atomic_read(&qp->qp_status.used); in qp_stop_fail_cb()
2203 u16 cur_tail = qp->qp_status.sq_tail; in qp_stop_fail_cb()
2204 u16 sq_depth = qp->sq_depth; in qp_stop_fail_cb()
2205 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; in qp_stop_fail_cb()
2206 struct hisi_qm *qm = qp->qm; in qp_stop_fail_cb()
2212 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); in qp_stop_fail_cb()
2213 atomic_dec(&qp->qp_status.used); in qp_stop_fail_cb()
2219 struct device *dev = &qm->pdev->dev; in qm_wait_qp_empty()
2246 return -ETIMEDOUT; in qm_wait_qp_empty()
2256 * qm_drain_qp() - Drain a qp.
2265 struct hisi_qm *qm = qp->qm; in qm_drain_qp()
2274 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { in qm_drain_qp()
2277 dev_err(&qm->pdev->dev, "Failed to stop qp!\n"); in qm_drain_qp()
2284 ret = qm_wait_qp_empty(qm, &state, qp->qp_id); in qm_drain_qp()
2291 if (qm->debug.dev_dfx.dev_timeout) in qm_drain_qp()
2292 qm->debug.dev_dfx.dev_state = state; in qm_drain_qp()
2299 struct hisi_qm *qm = qp->qm; in qm_stop_qp_nolock()
2300 struct device *dev = &qm->pdev->dev; in qm_stop_qp_nolock()
2309 if (atomic_read(&qp->qp_status.flags) != QP_START) { in qm_stop_qp_nolock()
2310 qp->is_resetting = false; in qm_stop_qp_nolock()
2314 atomic_set(&qp->qp_status.flags, QP_STOP); in qm_stop_qp_nolock()
2317 if (qm->ver < QM_HW_V3 || qm->status.stop_reason == QM_NORMAL) { in qm_stop_qp_nolock()
2320 dev_err(dev, "Failed to drain out data for stopping qp(%u)!\n", qp->qp_id); in qm_stop_qp_nolock()
2323 flush_workqueue(qm->wq); in qm_stop_qp_nolock()
2324 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) in qm_stop_qp_nolock()
2327 dev_dbg(dev, "stop queue %u!", qp->qp_id); in qm_stop_qp_nolock()
2331 * hisi_qm_stop_qp() - Stop a qp in qm.
2338 down_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2340 up_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2345 * hisi_qp_send() - Queue up a task in the hardware queue.
2349 * This function will return -EBUSY if qp is currently full, and -EAGAIN
2361 struct hisi_qp_status *qp_status = &qp->qp_status; in hisi_qp_send()
2362 u16 sq_tail = qp_status->sq_tail; in hisi_qp_send()
2363 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; in hisi_qp_send()
2366 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || in hisi_qp_send()
2367 atomic_read(&qp->qm->status.flags) == QM_STOP || in hisi_qp_send()
2368 qp->is_resetting)) { in hisi_qp_send()
2369 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); in hisi_qp_send()
2370 return -EAGAIN; in hisi_qp_send()
2374 return -EBUSY; in hisi_qp_send()
2376 memcpy(sqe, msg, qp->qm->sqe_size); in hisi_qp_send()
2378 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); in hisi_qp_send()
2379 atomic_inc(&qp->qp_status.used); in hisi_qp_send()
2380 qp_status->sq_tail = sq_tail_next; in hisi_qp_send()
2390 if (qm->ver == QM_HW_V1) in hisi_qm_cache_wb()
2393 writel(0x1, qm->io_base + QM_CACHE_WB_START); in hisi_qm_cache_wb()
2394 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, in hisi_qm_cache_wb()
2397 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); in hisi_qm_cache_wb()
2402 wake_up_interruptible(&qp->uacce_q->wait); in qm_qp_event_notifier()
2408 struct hisi_qm *qm = uacce->priv; in hisi_qm_get_available_instances()
2411 down_read(&qm->qps_lock); in hisi_qm_get_available_instances()
2412 ret = qm->qp_num - qm->qp_in_used; in hisi_qm_get_available_instances()
2413 up_read(&qm->qps_lock); in hisi_qm_get_available_instances()
2422 for (i = 0; i < qm->qp_num; i++) in hisi_qm_set_hw_reset()
2423 qm_set_qp_disable(&qm->qp_array[i], offset); in hisi_qm_set_hw_reset()
2430 struct hisi_qm *qm = uacce->priv; in hisi_qm_uacce_get_queue()
2438 q->priv = qp; in hisi_qm_uacce_get_queue()
2439 q->uacce = uacce; in hisi_qm_uacce_get_queue()
2440 qp->uacce_q = q; in hisi_qm_uacce_get_queue()
2441 qp->event_cb = qm_qp_event_notifier; in hisi_qm_uacce_get_queue()
2442 qp->pasid = arg; in hisi_qm_uacce_get_queue()
2443 qp->is_in_kernel = false; in hisi_qm_uacce_get_queue()
2450 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_put_queue()
2460 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_mmap()
2461 struct hisi_qm *qm = qp->qm; in hisi_qm_uacce_mmap()
2462 resource_size_t phys_base = qm->db_phys_base + in hisi_qm_uacce_mmap()
2463 qp->qp_id * qm->db_interval; in hisi_qm_uacce_mmap()
2464 size_t sz = vma->vm_end - vma->vm_start; in hisi_qm_uacce_mmap()
2465 struct pci_dev *pdev = qm->pdev; in hisi_qm_uacce_mmap()
2466 struct device *dev = &pdev->dev; in hisi_qm_uacce_mmap()
2470 switch (qfr->type) { in hisi_qm_uacce_mmap()
2472 if (qm->ver == QM_HW_V1) { in hisi_qm_uacce_mmap()
2474 return -EINVAL; in hisi_qm_uacce_mmap()
2475 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { in hisi_qm_uacce_mmap()
2478 return -EINVAL; in hisi_qm_uacce_mmap()
2480 if (sz > qm->db_interval) in hisi_qm_uacce_mmap()
2481 return -EINVAL; in hisi_qm_uacce_mmap()
2486 return remap_pfn_range(vma, vma->vm_start, in hisi_qm_uacce_mmap()
2488 sz, pgprot_noncached(vma->vm_page_prot)); in hisi_qm_uacce_mmap()
2490 if (sz != qp->qdma.size) in hisi_qm_uacce_mmap()
2491 return -EINVAL; in hisi_qm_uacce_mmap()
2497 vm_pgoff = vma->vm_pgoff; in hisi_qm_uacce_mmap()
2498 vma->vm_pgoff = 0; in hisi_qm_uacce_mmap()
2499 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, in hisi_qm_uacce_mmap()
2500 qp->qdma.dma, sz); in hisi_qm_uacce_mmap()
2501 vma->vm_pgoff = vm_pgoff; in hisi_qm_uacce_mmap()
2505 return -EINVAL; in hisi_qm_uacce_mmap()
2511 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_start_queue()
2513 return hisi_qm_start_qp(qp, qp->pasid); in hisi_qm_uacce_start_queue()
2518 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_stop_queue()
2519 struct hisi_qm *qm = qp->qm; in hisi_qm_uacce_stop_queue()
2520 struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx; in hisi_qm_uacce_stop_queue()
2525 if (!dev_dfx->dev_timeout || !dev_dfx->dev_state) in hisi_qm_uacce_stop_queue()
2536 if (i >= dev_dfx->dev_timeout) { in hisi_qm_uacce_stop_queue()
2537 dev_err(&qm->pdev->dev, "Stop q %u timeout, state %u\n", in hisi_qm_uacce_stop_queue()
2538 qp->qp_id, dev_dfx->dev_state); in hisi_qm_uacce_stop_queue()
2539 dev_dfx->dev_state = QM_FINISH_WAIT; in hisi_qm_uacce_stop_queue()
2547 struct hisi_qp *qp = q->priv; in hisi_qm_is_q_updated()
2548 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
2551 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { in hisi_qm_is_q_updated()
2555 cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
2564 struct hisi_qm *qm = q->uacce->priv; in qm_set_sqctype()
2565 struct hisi_qp *qp = q->priv; in qm_set_sqctype()
2567 down_write(&qm->qps_lock); in qm_set_sqctype()
2568 qp->alg_type = type; in qm_set_sqctype()
2569 up_write(&qm->qps_lock); in qm_set_sqctype()
2575 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_ioctl()
2582 return -EFAULT; in hisi_qm_uacce_ioctl()
2585 return -EINVAL; in hisi_qm_uacce_ioctl()
2588 qp_ctx.id = qp->qp_id; in hisi_qm_uacce_ioctl()
2592 return -EFAULT; in hisi_qm_uacce_ioctl()
2598 return -EFAULT; in hisi_qm_uacce_ioctl()
2600 qp_info.sqe_size = qp->qm->sqe_size; in hisi_qm_uacce_ioctl()
2601 qp_info.sq_depth = qp->sq_depth; in hisi_qm_uacce_ioctl()
2602 qp_info.cq_depth = qp->cq_depth; in hisi_qm_uacce_ioctl()
2606 return -EFAULT; in hisi_qm_uacce_ioctl()
2611 return -EINVAL; in hisi_qm_uacce_ioctl()
2615 * qm_hw_err_isolate() - Try to set the isolation status of the uacce device
2625 isolate = &qm->isolate_data; in qm_hw_err_isolate()
2630 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) in qm_hw_err_isolate()
2635 return -ENOMEM; in qm_hw_err_isolate()
2638 * Time-stamp every slot AER error. Then check the AER error log when the in qm_hw_err_isolate()
2643 mutex_lock(&isolate->isolate_lock); in qm_hw_err_isolate()
2644 hw_err->timestamp = jiffies; in qm_hw_err_isolate()
2645 list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) { in qm_hw_err_isolate()
2646 if ((hw_err->timestamp - err->timestamp) / HZ > in qm_hw_err_isolate()
2648 list_del(&err->list); in qm_hw_err_isolate()
2654 list_add(&hw_err->list, &isolate->qm_hw_errs); in qm_hw_err_isolate()
2655 mutex_unlock(&isolate->isolate_lock); in qm_hw_err_isolate()
2657 if (count >= isolate->err_threshold) in qm_hw_err_isolate()
2658 isolate->is_isolate = true; in qm_hw_err_isolate()
2667 mutex_lock(&qm->isolate_data.isolate_lock); in qm_hw_err_destroy()
2668 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { in qm_hw_err_destroy()
2669 list_del(&err->list); in qm_hw_err_destroy()
2672 mutex_unlock(&qm->isolate_data.isolate_lock); in qm_hw_err_destroy()
2677 struct hisi_qm *qm = uacce->priv; in hisi_qm_get_isolate_state()
2680 if (uacce->is_vf) in hisi_qm_get_isolate_state()
2681 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in hisi_qm_get_isolate_state()
2685 return pf_qm->isolate_data.is_isolate ? in hisi_qm_get_isolate_state()
2691 struct hisi_qm *qm = uacce->priv; in hisi_qm_isolate_threshold_write()
2694 if (uacce->is_vf) in hisi_qm_isolate_threshold_write()
2695 return -EPERM; in hisi_qm_isolate_threshold_write()
2697 if (qm->isolate_data.is_isolate) in hisi_qm_isolate_threshold_write()
2698 return -EPERM; in hisi_qm_isolate_threshold_write()
2700 qm->isolate_data.err_threshold = num; in hisi_qm_isolate_threshold_write()
2710 struct hisi_qm *qm = uacce->priv; in hisi_qm_isolate_threshold_read()
2713 if (uacce->is_vf) { in hisi_qm_isolate_threshold_read()
2714 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in hisi_qm_isolate_threshold_read()
2715 return pf_qm->isolate_data.err_threshold; in hisi_qm_isolate_threshold_read()
2718 return qm->isolate_data.err_threshold; in hisi_qm_isolate_threshold_read()
2737 struct uacce_device *uacce = qm->uacce; in qm_remove_uacce()
2739 if (qm->use_sva) { in qm_remove_uacce()
2742 qm->uacce = NULL; in qm_remove_uacce()
2748 struct pci_dev *pdev = qm->pdev; in qm_alloc_uacce()
2759 ret = strscpy(interface.name, dev_driver_string(&pdev->dev), in qm_alloc_uacce()
2762 return -ENAMETOOLONG; in qm_alloc_uacce()
2764 uacce = uacce_alloc(&pdev->dev, &interface); in qm_alloc_uacce()
2768 if (uacce->flags & UACCE_DEV_SVA) { in qm_alloc_uacce()
2769 qm->use_sva = true; in qm_alloc_uacce()
2773 return -EINVAL; in qm_alloc_uacce()
2776 uacce->is_vf = pdev->is_virtfn; in qm_alloc_uacce()
2777 uacce->priv = qm; in qm_alloc_uacce()
2779 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
2780 uacce->api_ver = HISI_QM_API_VER_BASE; in qm_alloc_uacce()
2781 else if (qm->ver == QM_HW_V2) in qm_alloc_uacce()
2782 uacce->api_ver = HISI_QM_API_VER2_BASE; in qm_alloc_uacce()
2784 uacce->api_ver = HISI_QM_API_VER3_BASE; in qm_alloc_uacce()
2786 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
2788 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_alloc_uacce()
2792 mmio_page_nr = qm->db_interval / PAGE_SIZE; in qm_alloc_uacce()
2797 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + in qm_alloc_uacce()
2801 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; in qm_alloc_uacce()
2802 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; in qm_alloc_uacce()
2804 qm->uacce = uacce; in qm_alloc_uacce()
2805 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); in qm_alloc_uacce()
2806 mutex_init(&qm->isolate_data.isolate_lock); in qm_alloc_uacce()
2812 * qm_frozen() - Try to froze QM to cut continuous queue request. If
2820 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) in qm_frozen()
2823 down_write(&qm->qps_lock); in qm_frozen()
2825 if (!qm->qp_in_used) { in qm_frozen()
2826 qm->qp_in_used = qm->qp_num; in qm_frozen()
2827 up_write(&qm->qps_lock); in qm_frozen()
2828 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); in qm_frozen()
2832 up_write(&qm->qps_lock); in qm_frozen()
2834 return -EBUSY; in qm_frozen()
2845 return -EINVAL; in qm_try_frozen_vfs()
2848 mutex_lock(&qm_list->lock); in qm_try_frozen_vfs()
2849 list_for_each_entry(qm, &qm_list->list, list) { in qm_try_frozen_vfs()
2850 dev = qm->pdev; in qm_try_frozen_vfs()
2862 mutex_unlock(&qm_list->lock); in qm_try_frozen_vfs()
2868 * hisi_qm_wait_task_finish() - Wait until the task is finished
2876 ((qm->fun_type == QM_HW_PF) && in hisi_qm_wait_task_finish()
2877 qm_try_frozen_vfs(qm->pdev, qm_list))) { in hisi_qm_wait_task_finish()
2881 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || in hisi_qm_wait_task_finish()
2882 test_bit(QM_RESETTING, &qm->misc_ctl)) in hisi_qm_wait_task_finish()
2885 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in hisi_qm_wait_task_finish()
2886 flush_work(&qm->cmd_process); in hisi_qm_wait_task_finish()
2894 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_uninit()
2898 for (i = num - 1; i >= 0; i--) { in hisi_qp_memory_uninit()
2899 qdma = &qm->qp_array[i].qdma; in hisi_qp_memory_uninit()
2900 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); in hisi_qp_memory_uninit()
2901 kfree(qm->poll_data[i].qp_finish_id); in hisi_qp_memory_uninit()
2904 kfree(qm->poll_data); in hisi_qp_memory_uninit()
2905 kfree(qm->qp_array); in hisi_qp_memory_uninit()
2911 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_init()
2912 size_t off = qm->sqe_size * sq_depth; in hisi_qp_memory_init()
2914 int ret = -ENOMEM; in hisi_qp_memory_init()
2916 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), in hisi_qp_memory_init()
2918 if (!qm->poll_data[id].qp_finish_id) in hisi_qp_memory_init()
2919 return -ENOMEM; in hisi_qp_memory_init()
2921 qp = &qm->qp_array[id]; in hisi_qp_memory_init()
2922 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, in hisi_qp_memory_init()
2924 if (!qp->qdma.va) in hisi_qp_memory_init()
2927 qp->sqe = qp->qdma.va; in hisi_qp_memory_init()
2928 qp->sqe_dma = qp->qdma.dma; in hisi_qp_memory_init()
2929 qp->cqe = qp->qdma.va + off; in hisi_qp_memory_init()
2930 qp->cqe_dma = qp->qdma.dma + off; in hisi_qp_memory_init()
2931 qp->qdma.size = dma_size; in hisi_qp_memory_init()
2932 qp->sq_depth = sq_depth; in hisi_qp_memory_init()
2933 qp->cq_depth = cq_depth; in hisi_qp_memory_init()
2934 qp->qm = qm; in hisi_qp_memory_init()
2935 qp->qp_id = id; in hisi_qp_memory_init()
2940 kfree(qm->poll_data[id].qp_finish_id); in hisi_qp_memory_init()
2946 struct pci_dev *pdev = qm->pdev; in hisi_qm_pre_init()
2948 if (qm->ver == QM_HW_V1) in hisi_qm_pre_init()
2949 qm->ops = &qm_hw_ops_v1; in hisi_qm_pre_init()
2950 else if (qm->ver == QM_HW_V2) in hisi_qm_pre_init()
2951 qm->ops = &qm_hw_ops_v2; in hisi_qm_pre_init()
2952 else if (qm->ver == QM_HW_V3) in hisi_qm_pre_init()
2953 qm->ops = &qm_hw_ops_v3; in hisi_qm_pre_init()
2955 qm->ops = &qm_hw_ops_v4; in hisi_qm_pre_init()
2958 mutex_init(&qm->mailbox_lock); in hisi_qm_pre_init()
2959 mutex_init(&qm->ifc_lock); in hisi_qm_pre_init()
2960 init_rwsem(&qm->qps_lock); in hisi_qm_pre_init()
2961 qm->qp_in_used = 0; in hisi_qm_pre_init()
2962 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { in hisi_qm_pre_init()
2963 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) in hisi_qm_pre_init()
2964 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); in hisi_qm_pre_init()
2972 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_cmd_uninit()
2975 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
2977 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
2984 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_cmd_init()
2991 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
2993 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
2998 struct pci_dev *pdev = qm->pdev; in qm_put_pci_res()
3000 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_put_pci_res()
3001 iounmap(qm->db_io_base); in qm_put_pci_res()
3003 iounmap(qm->io_base); in qm_put_pci_res()
3009 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_uninit()
3018 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) in hisi_qm_set_state()
3019 writel(state, qm->io_base + QM_VF_STATE); in hisi_qm_set_state()
3024 destroy_workqueue(qm->wq); in hisi_qm_unint_work()
3029 struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; in hisi_qm_free_rsv_buf()
3030 struct device *dev = &qm->pdev->dev; in hisi_qm_free_rsv_buf()
3032 dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma); in hisi_qm_free_rsv_buf()
3037 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_uninit()
3039 hisi_qp_memory_uninit(qm, qm->qp_num); in hisi_qm_memory_uninit()
3041 if (qm->qdma.va) { in hisi_qm_memory_uninit()
3043 dma_free_coherent(dev, qm->qdma.size, in hisi_qm_memory_uninit()
3044 qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_uninit()
3047 idr_destroy(&qm->qp_idr); in hisi_qm_memory_uninit()
3049 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_memory_uninit()
3050 kfree(qm->factor); in hisi_qm_memory_uninit()
3054 * hisi_qm_uninit() - Uninitialize qm.
3064 down_write(&qm->qps_lock); in hisi_qm_uninit()
3067 up_write(&qm->qps_lock); in hisi_qm_uninit()
3076 * hisi_qm_get_vft() - Get vft from a qm.
3090 return -EINVAL; in hisi_qm_get_vft()
3092 if (!qm->ops->get_vft) { in hisi_qm_get_vft()
3093 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); in hisi_qm_get_vft()
3094 return -EINVAL; in hisi_qm_get_vft()
3097 return qm->ops->get_vft(qm, base, number); in hisi_qm_get_vft()
3101 * hisi_qm_set_vft() - Set vft to a qm.
3110 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
3111 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3117 u32 max_q_num = qm->ctrl_qp_num; in hisi_qm_set_vft()
3121 return -EINVAL; in hisi_qm_set_vft()
3128 struct hisi_qm_status *status = &qm->status; in qm_init_eq_aeq_status()
3130 status->eq_head = 0; in qm_init_eq_aeq_status()
3131 status->aeq_head = 0; in qm_init_eq_aeq_status()
3132 status->eqc_phase = true; in qm_init_eq_aeq_status()
3133 status->aeqc_phase = true; in qm_init_eq_aeq_status()
3139 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_enable_eq_aeq_interrupts()
3140 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_enable_eq_aeq_interrupts()
3142 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
3143 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
3148 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
3149 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
3156 eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3157 eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3158 if (qm->ver == QM_HW_V1) in qm_eq_ctx_cfg()
3160 eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_eq_ctx_cfg()
3169 aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3170 aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3171 aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_aeq_ctx_cfg()
3178 struct device *dev = &qm->pdev->dev; in qm_eq_aeq_ctx_cfg()
3196 WARN_ON(!qm->qdma.va); in __hisi_qm_start()
3198 if (qm->fun_type == QM_HW_PF) { in __hisi_qm_start()
3199 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); in __hisi_qm_start()
3208 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); in __hisi_qm_start()
3212 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); in __hisi_qm_start()
3223 * hisi_qm_start() - start qm
3230 struct device *dev = &qm->pdev->dev; in hisi_qm_start()
3233 down_write(&qm->qps_lock); in hisi_qm_start()
3235 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); in hisi_qm_start()
3237 if (!qm->qp_num) { in hisi_qm_start()
3239 ret = -EINVAL; in hisi_qm_start()
3247 atomic_set(&qm->status.flags, QM_WORK); in hisi_qm_start()
3251 up_write(&qm->qps_lock); in hisi_qm_start()
3258 struct device *dev = &qm->pdev->dev; in qm_restart()
3266 down_write(&qm->qps_lock); in qm_restart()
3267 for (i = 0; i < qm->qp_num; i++) { in qm_restart()
3268 qp = &qm->qp_array[i]; in qm_restart()
3269 if (atomic_read(&qp->qp_status.flags) == QP_STOP && in qm_restart()
3270 qp->is_resetting == true) { in qm_restart()
3275 up_write(&qm->qps_lock); in qm_restart()
3278 qp->is_resetting = false; in qm_restart()
3281 up_write(&qm->qps_lock); in qm_restart()
3292 for (i = 0; i < qm->qp_num; i++) { in qm_stop_started_qp()
3293 qp = &qm->qp_array[i]; in qm_stop_started_qp()
3294 if (atomic_read(&qp->qp_status.flags) == QP_START) { in qm_stop_started_qp()
3295 qp->is_resetting = true; in qm_stop_started_qp()
3302 * qm_clear_queues() - Clear all queues memory in a qm.
3313 for (i = 0; i < qm->qp_num; i++) { in qm_clear_queues()
3314 qp = &qm->qp_array[i]; in qm_clear_queues()
3315 if (qp->is_in_kernel && qp->is_resetting) in qm_clear_queues()
3316 memset(qp->qdma.va, 0, qp->qdma.size); in qm_clear_queues()
3319 memset(qm->qdma.va, 0, qm->qdma.size); in qm_clear_queues()
3323 * hisi_qm_stop() - Stop a qm.
3333 struct device *dev = &qm->pdev->dev; in hisi_qm_stop()
3336 down_write(&qm->qps_lock); in hisi_qm_stop()
3338 if (atomic_read(&qm->status.flags) == QM_STOP) in hisi_qm_stop()
3342 atomic_set(&qm->status.flags, QM_STOP); in hisi_qm_stop()
3343 qm->status.stop_reason = r; in hisi_qm_stop()
3345 if (qm->status.stop_reason != QM_NORMAL) { in hisi_qm_stop()
3352 if (test_bit(QM_SUPPORT_STOP_FUNC, &qm->caps) && in hisi_qm_stop()
3367 if (qm->fun_type == QM_HW_PF) { in hisi_qm_stop()
3371 ret = -EBUSY; in hisi_qm_stop()
3377 qm->status.stop_reason = QM_NORMAL; in hisi_qm_stop()
3380 up_write(&qm->qps_lock); in hisi_qm_stop()
3387 if (!qm->ops->hw_error_init) { in qm_hw_error_init()
3388 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); in qm_hw_error_init()
3392 qm->ops->hw_error_init(qm); in qm_hw_error_init()
3397 if (!qm->ops->hw_error_uninit) { in qm_hw_error_uninit()
3398 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); in qm_hw_error_uninit()
3402 qm->ops->hw_error_uninit(qm); in qm_hw_error_uninit()
3407 if (!qm->ops->hw_error_handle) { in qm_hw_error_handle()
3408 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); in qm_hw_error_handle()
3412 return qm->ops->hw_error_handle(qm); in qm_hw_error_handle()
3416 * hisi_qm_dev_err_init() - Initialize device error configuration.
3423 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_init()
3428 if (!qm->err_ini->hw_err_enable) { in hisi_qm_dev_err_init()
3429 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); in hisi_qm_dev_err_init()
3432 qm->err_ini->hw_err_enable(qm); in hisi_qm_dev_err_init()
3437 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
3444 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_uninit()
3449 if (!qm->err_ini->hw_err_disable) { in hisi_qm_dev_err_uninit()
3450 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); in hisi_qm_dev_err_uninit()
3453 qm->err_ini->hw_err_disable(qm); in hisi_qm_dev_err_uninit()
3458 * hisi_qm_free_qps() - free multiple queue pairs.
3469 for (i = qp_num - 1; i >= 0; i--) in hisi_qm_free_qps()
3479 list_del(&res->list); in free_list()
3493 list_for_each_entry(qm, &qm_list->list, list) { in hisi_qm_sort_devices()
3494 dev = &qm->pdev->dev; in hisi_qm_sort_devices()
3502 return -ENOMEM; in hisi_qm_sort_devices()
3504 res->qm = qm; in hisi_qm_sort_devices()
3505 res->distance = node_distance(dev_node, node); in hisi_qm_sort_devices()
3508 if (res->distance < tmp->distance) { in hisi_qm_sort_devices()
3509 n = &tmp->list; in hisi_qm_sort_devices()
3513 list_add_tail(&res->list, n); in hisi_qm_sort_devices()
3520 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3535 int ret = -ENODEV; in hisi_qm_alloc_qps_node()
3540 return -EINVAL; in hisi_qm_alloc_qps_node()
3542 mutex_lock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3544 mutex_unlock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3550 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); in hisi_qm_alloc_qps_node()
3563 mutex_unlock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3577 u32 max_qp_num = qm->max_qp_num; in qm_vf_q_assign()
3578 u32 q_base = qm->qp_num; in qm_vf_q_assign()
3582 return -EINVAL; in qm_vf_q_assign()
3584 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; in qm_vf_q_assign()
3588 return -EINVAL; in qm_vf_q_assign()
3593 for (i = num_vfs; i > 0; i--) { in qm_vf_q_assign()
3603 remain_q_num--; in qm_vf_q_assign()
3611 for (j = num_vfs; j > i; j--) in qm_vf_q_assign()
3626 for (i = 1; i <= qm->vfs_num; i++) { in qm_clear_vft_config()
3631 qm->vfs_num = 0; in qm_clear_vft_config()
3638 struct device *dev = &qm->pdev->dev; in qm_func_shaper_enable()
3642 total_vfs = pci_sriov_get_totalvfs(qm->pdev); in qm_func_shaper_enable()
3644 return -EINVAL; in qm_func_shaper_enable()
3646 qm->factor[fun_index].func_qos = qos; in qm_func_shaper_enable()
3648 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); in qm_func_shaper_enable()
3651 return -EINVAL; in qm_func_shaper_enable()
3659 return -EINVAL; in qm_func_shaper_enable()
3674 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
3680 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); in qm_get_shaper_vft_qos()
3681 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); in qm_get_shaper_vft_qos()
3682 writel(fun_index, qm->io_base + QM_VFT_CFG); in qm_get_shaper_vft_qos()
3684 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_get_shaper_vft_qos()
3685 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_get_shaper_vft_qos()
3687 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
3693 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | in qm_get_shaper_vft_qos()
3694 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); in qm_get_shaper_vft_qos()
3705 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; in qm_get_shaper_vft_qos()
3707 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; in qm_get_shaper_vft_qos()
3709 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); in qm_get_shaper_vft_qos()
3718 struct device *dev = &qm->pdev->dev; in qm_vf_get_qos()
3736 int ret = -EINVAL; in qm_vf_read_qos()
3739 qm->mb_qos = 0; in qm_vf_read_qos()
3744 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); in qm_vf_read_qos()
3750 if (qm->mb_qos) in qm_vf_read_qos()
3754 pci_err(qm->pdev, "PF ping VF timeout!\n"); in qm_vf_read_qos()
3755 return -ETIMEDOUT; in qm_vf_read_qos()
3765 struct hisi_qm *qm = filp->private_data; in qm_algqos_read()
3775 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_read()
3776 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); in qm_algqos_read()
3777 ret = -EAGAIN; in qm_algqos_read()
3781 if (qm->fun_type == QM_HW_PF) { in qm_algqos_read()
3787 ir = qm->mb_qos; in qm_algqos_read()
3796 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_read()
3806 const struct bus_type *bus_type = qm->pdev->dev.bus; in qm_get_qos_value()
3815 return -EINVAL; in qm_get_qos_value()
3819 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); in qm_get_qos_value()
3820 return -EINVAL; in qm_get_qos_value()
3825 pci_err(qm->pdev, "input pci bdf number is error!\n"); in qm_get_qos_value()
3826 return -ENODEV; in qm_get_qos_value()
3831 *fun_index = pdev->devfn; in qm_get_qos_value()
3839 struct hisi_qm *qm = filp->private_data; in qm_algqos_write()
3849 return -ENOSPC; in qm_algqos_write()
3851 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); in qm_algqos_write()
3861 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_write()
3862 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); in qm_algqos_write()
3863 return -EAGAIN; in qm_algqos_write()
3868 ret = -EINVAL; in qm_algqos_write()
3874 pci_err(qm->pdev, "failed to enable function shaper!\n"); in qm_algqos_write()
3875 ret = -EINVAL; in qm_algqos_write()
3879 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", in qm_algqos_write()
3886 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_write()
3898 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
3905 if (qm->fun_type == QM_HW_PF) in hisi_qm_set_algqos_init()
3906 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, in hisi_qm_set_algqos_init()
3908 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in hisi_qm_set_algqos_init()
3909 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, in hisi_qm_set_algqos_init()
3918 qm->factor[i].func_qos = QM_QOS_MAX_VAL; in hisi_qm_init_vf_qos()
3922 * hisi_qm_sriov_enable() - enable virtual functions
3942 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", in hisi_qm_sriov_enable()
3949 ret = -ERANGE; in hisi_qm_sriov_enable()
3955 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_sriov_enable()
3970 qm->vfs_num = num_vfs; in hisi_qm_sriov_enable()
3983 * hisi_qm_sriov_disable - disable virtual functions
3995 return -EPERM; in hisi_qm_sriov_disable()
3999 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { in hisi_qm_sriov_disable()
4001 return -EBUSY; in hisi_qm_sriov_disable()
4006 qm->vfs_num = 0; in hisi_qm_sriov_disable()
4014 * hisi_qm_sriov_configure - configure the number of VFs
4018 * Enable SR-IOV according to num_vfs, 0 means disable.
4031 if (!qm->err_ini->get_err_result) { in qm_dev_err_handle()
4032 dev_err(&qm->pdev->dev, "Device doesn't support reset!\n"); in qm_dev_err_handle()
4036 return qm->err_ini->get_err_result(qm); in qm_dev_err_handle()
4055 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
4068 if (pdev->is_virtfn) in hisi_qm_dev_err_detected()
4085 struct pci_dev *pdev = qm->pdev; in qm_check_req_recv()
4089 if (qm->ver >= QM_HW_V3) in qm_check_req_recv()
4092 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4093 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4097 dev_err(&pdev->dev, "Fails to read QM reg!\n"); in qm_check_req_recv()
4101 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4102 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4106 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); in qm_check_req_recv()
4113 struct pci_dev *pdev = qm->pdev; in qm_set_pf_mse()
4132 return -ETIMEDOUT; in qm_set_pf_mse()
4137 struct pci_dev *pdev = qm->pdev; in qm_set_vf_mse()
4164 return -ETIMEDOUT; in qm_set_vf_mse()
4172 if (qm->ver >= QM_HW_V3) in qm_dev_ecc_mbit_handle()
4175 if (!qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4176 qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4177 qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4178 qm->err_ini->close_axi_master_ooo(qm); in qm_dev_ecc_mbit_handle()
4179 } else if (qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4180 !qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4181 !qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4182 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4184 qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4185 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); in qm_dev_ecc_mbit_handle()
4192 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_prepare()
4193 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_prepare()
4198 mutex_lock(&qm_list->lock); in qm_vf_reset_prepare()
4199 list_for_each_entry(vf_qm, &qm_list->list, list) { in qm_vf_reset_prepare()
4200 virtfn = vf_qm->pdev; in qm_vf_reset_prepare()
4215 mutex_unlock(&qm_list->lock); in qm_vf_reset_prepare()
4222 struct pci_dev *pdev = qm->pdev; in qm_try_stop_vfs()
4225 if (!qm->vfs_num) in qm_try_stop_vfs()
4229 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { in qm_try_stop_vfs()
4244 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_prepare()
4247 if (qm->err_ini->set_priv_status) { in qm_controller_reset_prepare()
4248 ret = qm->err_ini->set_priv_status(qm); in qm_controller_reset_prepare()
4276 if (qm->use_sva) { in qm_controller_reset_prepare()
4286 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset_prepare()
4297 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_master_ooo_check()
4298 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, in qm_master_ooo_check()
4302 pci_warn(qm->pdev, "Bus lock! Please reset system.\n"); in qm_master_ooo_check()
4309 struct pci_dev *pdev = qm->pdev; in qm_soft_reset_prepare()
4317 if (qm->vfs_num) { in qm_soft_reset_prepare()
4325 ret = qm->ops->set_msi(qm, false); in qm_soft_reset_prepare()
4335 if (qm->err_ini->close_sva_prefetch) in qm_soft_reset_prepare()
4336 qm->err_ini->close_sva_prefetch(qm); in qm_soft_reset_prepare()
4347 struct pci_dev *pdev = qm->pdev; in qm_reset_device()
4349 /* The reset related sub-control registers are not in PCI BAR */ in qm_reset_device()
4350 if (ACPI_HANDLE(&pdev->dev)) { in qm_reset_device()
4354 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), in qm_reset_device()
4355 qm->err_info.acpi_rst, in qm_reset_device()
4359 return -EIO; in qm_reset_device()
4364 return -EIO; in qm_reset_device()
4371 return -EINVAL; in qm_reset_device()
4387 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_done()
4388 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_done()
4393 mutex_lock(&qm_list->lock); in qm_vf_reset_done()
4394 list_for_each_entry(vf_qm, &qm_list->list, list) { in qm_vf_reset_done()
4395 virtfn = vf_qm->pdev; in qm_vf_reset_done()
4410 mutex_unlock(&qm_list->lock); in qm_vf_reset_done()
4416 struct pci_dev *pdev = qm->pdev; in qm_try_start_vfs()
4419 if (!qm->vfs_num) in qm_try_start_vfs()
4422 ret = qm_vf_q_assign(qm, qm->vfs_num); in qm_try_start_vfs()
4429 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { in qm_try_start_vfs()
4444 return qm->err_ini->hw_init(qm); in qm_dev_hw_init()
4451 if (qm->err_ini->open_sva_prefetch) in qm_restart_prepare()
4452 qm->err_ini->open_sva_prefetch(qm); in qm_restart_prepare()
4454 if (qm->ver >= QM_HW_V3) in qm_restart_prepare()
4457 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_prepare()
4458 !qm->err_status.is_dev_ecc_mbit) in qm_restart_prepare()
4462 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4463 writel(value & ~qm->err_info.msi_wr_port, in qm_restart_prepare()
4464 qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4467 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; in qm_restart_prepare()
4468 if (value && qm->err_ini->clear_dev_hw_err_status) in qm_restart_prepare()
4469 qm->err_ini->clear_dev_hw_err_status(qm, value); in qm_restart_prepare()
4472 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_restart_prepare()
4475 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); in qm_restart_prepare()
4482 if (qm->ver >= QM_HW_V3) in qm_restart_done()
4485 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_done()
4486 !qm->err_status.is_dev_ecc_mbit) in qm_restart_done()
4490 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4491 value |= qm->err_info.msi_wr_port; in qm_restart_done()
4492 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4495 qm->err_status.is_qm_ecc_mbit = false; in qm_restart_done()
4496 qm->err_status.is_dev_ecc_mbit = false; in qm_restart_done()
4501 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_done()
4504 ret = qm->ops->set_msi(qm, true); in qm_controller_reset_done()
4516 if (qm->vfs_num) { in qm_controller_reset_done()
4532 if (qm->err_ini->open_axi_master_ooo) in qm_controller_reset_done()
4533 qm->err_ini->open_axi_master_ooo(qm); in qm_controller_reset_done()
4565 struct pci_dev *pdev = qm->pdev; in qm_controller_reset()
4574 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset()
4579 if (qm->err_ini->show_last_dfx_regs) in qm_controller_reset()
4580 qm->err_ini->show_last_dfx_regs(qm); in qm_controller_reset()
4599 if (qm->use_sva) in qm_controller_reset()
4600 qm->isolate_data.is_isolate = true; in qm_controller_reset()
4605 * hisi_qm_dev_slot_reset() - slot reset
4616 if (pdev->is_virtfn) in hisi_qm_dev_slot_reset()
4656 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_prepare()
4685 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); in qm_flr_reset_complete()
4700 if (qm->fun_type == QM_HW_PF) { in hisi_qm_reset_done()
4725 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_done()
4740 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); in qm_abnormal_irq()
4743 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && in qm_abnormal_irq()
4744 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) in qm_abnormal_irq()
4745 schedule_work(&qm->rst_work); in qm_abnormal_irq()
4751 * hisi_qm_dev_shutdown() - Shutdown device.
4763 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); in hisi_qm_dev_shutdown()
4776 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in hisi_qm_controller_reset()
4783 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); in hisi_qm_controller_reset()
4792 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_prepare()
4797 dev_err(&pdev->dev, "reset prepare not ready!\n"); in qm_pf_reset_vf_prepare()
4798 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
4805 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); in qm_pf_reset_vf_prepare()
4806 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
4820 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); in qm_pf_reset_vf_prepare()
4826 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_done()
4832 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); in qm_pf_reset_vf_done()
4839 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); in qm_pf_reset_vf_done()
4846 struct device *dev = &qm->pdev->dev; in qm_wait_pf_reset_finish()
4851 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, in qm_wait_pf_reset_finish()
4857 return -ETIMEDOUT; in qm_wait_pf_reset_finish()
4864 ret = qm->ops->get_ifc(qm, &cmd, NULL, 0); in qm_wait_pf_reset_finish()
4873 ret = -EINVAL; in qm_wait_pf_reset_finish()
4882 struct device *dev = &qm->pdev->dev; in qm_pf_reset_vf_process()
4908 struct device *dev = &qm->pdev->dev; in qm_handle_cmd_msg()
4917 ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num); in qm_handle_cmd_msg()
4935 qm->mb_qos = data; in qm_handle_cmd_msg()
4947 u32 vfs_num = qm->vfs_num; in qm_cmd_process()
4951 if (qm->fun_type == QM_HW_PF) { in qm_cmd_process()
4952 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_cmd_process()
4968 * hisi_qm_alg_register() - Register alg to crypto.
4977 struct device *dev = &qm->pdev->dev; in hisi_qm_alg_register()
4979 if (qm->ver <= QM_HW_V2 && qm->use_sva) { in hisi_qm_alg_register()
4984 if (qm->qp_num < guard) { in hisi_qm_alg_register()
4989 return qm_list->register_to_crypto(qm); in hisi_qm_alg_register()
4994 * hisi_qm_alg_unregister() - Unregister alg from crypto.
5003 if (qm->ver <= QM_HW_V2 && qm->use_sva) in hisi_qm_alg_unregister()
5006 if (qm->qp_num < guard) in hisi_qm_alg_unregister()
5009 qm_list->unregister_from_crypto(qm); in hisi_qm_alg_unregister()
5015 struct pci_dev *pdev = qm->pdev; in qm_unregister_abnormal_irq()
5018 if (qm->fun_type == QM_HW_VF) in qm_unregister_abnormal_irq()
5021 val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; in qm_unregister_abnormal_irq()
5031 struct pci_dev *pdev = qm->pdev; in qm_register_abnormal_irq()
5035 if (qm->fun_type == QM_HW_VF) in qm_register_abnormal_irq()
5038 val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; in qm_register_abnormal_irq()
5043 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); in qm_register_abnormal_irq()
5045 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); in qm_register_abnormal_irq()
5052 struct pci_dev *pdev = qm->pdev; in qm_unregister_mb_cmd_irq()
5055 val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val; in qm_unregister_mb_cmd_irq()
5065 struct pci_dev *pdev = qm->pdev; in qm_register_mb_cmd_irq()
5069 val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val; in qm_register_mb_cmd_irq()
5074 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); in qm_register_mb_cmd_irq()
5076 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); in qm_register_mb_cmd_irq()
5083 struct pci_dev *pdev = qm->pdev; in qm_unregister_aeq_irq()
5086 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val; in qm_unregister_aeq_irq()
5096 struct pci_dev *pdev = qm->pdev; in qm_register_aeq_irq()
5100 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val; in qm_register_aeq_irq()
5106 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); in qm_register_aeq_irq()
5108 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); in qm_register_aeq_irq()
5115 struct pci_dev *pdev = qm->pdev; in qm_unregister_eq_irq()
5118 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val; in qm_unregister_eq_irq()
5128 struct pci_dev *pdev = qm->pdev; in qm_register_eq_irq()
5132 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val; in qm_register_eq_irq()
5137 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); in qm_register_eq_irq()
5139 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); in qm_register_eq_irq()
5185 struct device *dev = &qm->pdev->dev; in qm_get_qp_num()
5189 if (qm->fun_type == QM_HW_VF) { in qm_get_qp_num()
5190 if (qm->ver != QM_HW_V1) in qm_get_qp_num()
5192 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); in qm_get_qp_num()
5197 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); in qm_get_qp_num()
5198 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); in qm_get_qp_num()
5199 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, in qm_get_qp_num()
5202 if (qm->qp_num <= qm->max_qp_num) in qm_get_qp_num()
5205 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { in qm_get_qp_num()
5208 qm->qp_num, qm->max_qp_num); in qm_get_qp_num()
5209 return -EINVAL; in qm_get_qp_num()
5213 qm->qp_num, qm->max_qp_num); in qm_get_qp_num()
5214 qm->qp_num = qm->max_qp_num; in qm_get_qp_num()
5215 qm->debug.curr_qm_qp_num = qm->qp_num; in qm_get_qp_num()
5223 struct pci_dev *pdev = qm->pdev; in qm_pre_store_caps()
5227 qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL); in qm_pre_store_caps()
5229 return -ENOMEM; in qm_pre_store_caps()
5235 i, qm->cap_ver); in qm_pre_store_caps()
5238 qm->cap_tables.qm_cap_table = qm_cap; in qm_pre_store_caps()
5239 qm->cap_tables.qm_cap_size = size; in qm_pre_store_caps()
5246 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? in qm_get_hw_caps()
5248 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : in qm_get_hw_caps()
5255 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); in qm_get_hw_caps()
5257 if (qm->ver >= QM_HW_V3) { in qm_get_hw_caps()
5258 val = readl(qm->io_base + QM_FUNC_CAPS_REG); in qm_get_hw_caps()
5259 qm->cap_ver = val & QM_CAPBILITY_VERSION; in qm_get_hw_caps()
5264 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); in qm_get_hw_caps()
5266 set_bit(qm_cap_info_comm[i].type, &qm->caps); in qm_get_hw_caps()
5271 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); in qm_get_hw_caps()
5273 set_bit(cap_info[i].type, &qm->caps); in qm_get_hw_caps()
5282 struct pci_dev *pdev = qm->pdev; in qm_get_version()
5285 qm->ver = pdev->revision; in qm_get_version()
5287 if (pdev->revision == QM_HW_V3) { in qm_get_version()
5288 sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID); in qm_get_version()
5290 qm->ver = sub_version_id; in qm_get_version()
5296 struct pci_dev *pdev = qm->pdev; in qm_get_pci_res()
5297 struct device *dev = &pdev->dev; in qm_get_pci_res()
5300 ret = pci_request_mem_regions(pdev, qm->dev_name); in qm_get_pci_res()
5306 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); in qm_get_pci_res()
5307 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); in qm_get_pci_res()
5308 if (!qm->io_base) { in qm_get_pci_res()
5309 ret = -EIO; in qm_get_pci_res()
5319 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { in qm_get_pci_res()
5320 qm->db_interval = QM_QP_DB_INTERVAL; in qm_get_pci_res()
5321 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); in qm_get_pci_res()
5322 qm->db_io_base = ioremap(qm->db_phys_base, in qm_get_pci_res()
5324 if (!qm->db_io_base) { in qm_get_pci_res()
5325 ret = -EIO; in qm_get_pci_res()
5329 qm->db_phys_base = qm->phys_base; in qm_get_pci_res()
5330 qm->db_io_base = qm->io_base; in qm_get_pci_res()
5331 qm->db_interval = 0; in qm_get_pci_res()
5342 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_get_pci_res()
5343 iounmap(qm->db_io_base); in qm_get_pci_res()
5345 iounmap(qm->io_base); in qm_get_pci_res()
5353 acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); in qm_clear_device()
5356 if (qm->fun_type == QM_HW_VF) in qm_clear_device()
5360 if (!qm->err_ini->err_info_init) in qm_clear_device()
5362 qm->err_ini->err_info_init(qm); in qm_clear_device()
5368 if (!acpi_has_method(handle, qm->err_info.acpi_rst)) in qm_clear_device()
5373 writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_clear_device()
5377 if (qm->err_ini->set_priv_status) { in qm_clear_device()
5378 ret = qm->err_ini->set_priv_status(qm); in qm_clear_device()
5380 writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_clear_device()
5390 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_init()
5391 struct device *dev = &pdev->dev; in hisi_qm_pci_init()
5436 for (i = 0; i < qm->qp_num; i++) in hisi_qm_init_work()
5437 INIT_WORK(&qm->poll_data[i].work, qm_work_process); in hisi_qm_init_work()
5439 if (qm->fun_type == QM_HW_PF) in hisi_qm_init_work()
5440 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); in hisi_qm_init_work()
5442 if (qm->ver > QM_HW_V2) in hisi_qm_init_work()
5443 INIT_WORK(&qm->cmd_process, qm_cmd_process); in hisi_qm_init_work()
5445 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | in hisi_qm_init_work()
5447 pci_name(qm->pdev)); in hisi_qm_init_work()
5448 if (!qm->wq) { in hisi_qm_init_work()
5449 pci_err(qm->pdev, "failed to alloc workqueue!\n"); in hisi_qm_init_work()
5450 return -ENOMEM; in hisi_qm_init_work()
5458 struct device *dev = &qm->pdev->dev; in hisi_qp_alloc_memory()
5463 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); in hisi_qp_alloc_memory()
5464 if (!qm->qp_array) in hisi_qp_alloc_memory()
5465 return -ENOMEM; in hisi_qp_alloc_memory()
5467 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); in hisi_qp_alloc_memory()
5468 if (!qm->poll_data) { in hisi_qp_alloc_memory()
5469 kfree(qm->qp_array); in hisi_qp_alloc_memory()
5470 return -ENOMEM; in hisi_qp_alloc_memory()
5476 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; in hisi_qp_alloc_memory()
5478 for (i = 0; i < qm->qp_num; i++) { in hisi_qp_alloc_memory()
5479 qm->poll_data[i].qm = qm; in hisi_qp_alloc_memory()
5496 struct qm_rsv_buf *xqc_buf = &qm->xqc_buf; in hisi_qm_alloc_rsv_buf()
5497 struct qm_dma *xqc_dma = &xqc_buf->qcdma; in hisi_qm_alloc_rsv_buf()
5498 struct device *dev = &qm->pdev->dev; in hisi_qm_alloc_rsv_buf()
5502 (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \ in hisi_qm_alloc_rsv_buf()
5503 (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \ in hisi_qm_alloc_rsv_buf()
5507 xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) + in hisi_qm_alloc_rsv_buf()
5511 xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size, in hisi_qm_alloc_rsv_buf()
5512 &xqc_dma->dma, GFP_KERNEL); in hisi_qm_alloc_rsv_buf()
5513 if (!xqc_dma->va) in hisi_qm_alloc_rsv_buf()
5514 return -ENOMEM; in hisi_qm_alloc_rsv_buf()
5526 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_init()
5530 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { in hisi_qm_memory_init()
5531 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; in hisi_qm_memory_init()
5532 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); in hisi_qm_memory_init()
5533 if (!qm->factor) in hisi_qm_memory_init()
5534 return -ENOMEM; in hisi_qm_memory_init()
5537 qm->factor[0].func_qos = QM_QOS_MAX_VAL; in hisi_qm_memory_init()
5541 (qm)->type = ((qm)->qdma.va + (off)); \ in hisi_qm_memory_init()
5542 (qm)->type##_dma = (qm)->qdma.dma + (off); \ in hisi_qm_memory_init()
5546 idr_init(&qm->qp_idr); in hisi_qm_memory_init()
5547 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); in hisi_qm_memory_init()
5548 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + in hisi_qm_memory_init()
5549 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + in hisi_qm_memory_init()
5550 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + in hisi_qm_memory_init()
5551 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); in hisi_qm_memory_init()
5552 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, in hisi_qm_memory_init()
5554 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); in hisi_qm_memory_init()
5555 if (!qm->qdma.va) { in hisi_qm_memory_init()
5556 ret = -ENOMEM; in hisi_qm_memory_init()
5560 QM_INIT_BUF(qm, eqe, qm->eq_depth); in hisi_qm_memory_init()
5561 QM_INIT_BUF(qm, aeqe, qm->aeq_depth); in hisi_qm_memory_init()
5562 QM_INIT_BUF(qm, sqc, qm->qp_num); in hisi_qm_memory_init()
5563 QM_INIT_BUF(qm, cqc, qm->qp_num); in hisi_qm_memory_init()
5578 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_init()
5580 idr_destroy(&qm->qp_idr); in hisi_qm_memory_init()
5581 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_memory_init()
5582 kfree(qm->factor); in hisi_qm_memory_init()
5588 * hisi_qm_init() - Initialize configures about qm.
5595 struct pci_dev *pdev = qm->pdev; in hisi_qm_init()
5596 struct device *dev = &pdev->dev; in hisi_qm_init()
5607 if (qm->fun_type == QM_HW_PF) { in hisi_qm_init()
5609 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); in hisi_qm_init()
5618 if (qm->mode == UACCE_MODE_SVA) { in hisi_qm_init()
5649 * hisi_qm_get_dfx_access() - Try to get dfx access.
5652 * Try to get dfx access, then user can get message.
5659 struct device *dev = &qm->pdev->dev; in hisi_qm_get_dfx_access()
5662 dev_info(dev, "can not read/write - device in suspended.\n"); in hisi_qm_get_dfx_access()
5663 return -EAGAIN; in hisi_qm_get_dfx_access()
5671 * hisi_qm_put_dfx_access() - Put dfx access.
5674 * Put dfx access, drop runtime PM usage counter.
5683 * hisi_qm_pm_init() - Initialize qm runtime PM.
5690 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_init()
5692 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in hisi_qm_pm_init()
5702 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
5709 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_uninit()
5711 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in hisi_qm_pm_uninit()
5721 struct pci_dev *pdev = qm->pdev; in qm_prepare_for_suspend()
5724 ret = qm->ops->set_msi(qm, false); in qm_prepare_for_suspend()
5734 if (qm->err_ini->set_priv_status) { in qm_prepare_for_suspend()
5735 ret = qm->err_ini->set_priv_status(qm); in qm_prepare_for_suspend()
5749 struct pci_dev *pdev = qm->pdev; in qm_rebuild_for_resume()
5758 ret = qm->ops->set_msi(qm, true); in qm_rebuild_for_resume()
5773 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); in qm_rebuild_for_resume()
5783 * hisi_qm_suspend() - Runtime suspend of given device.
5811 * hisi_qm_resume() - Runtime resume of given device.