Lines Matching full:qm

382 	struct hisi_qm *qm;  member
398 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
399 void (*qm_db)(struct hisi_qm *qm, u16 qn,
401 int (*debug_init)(struct hisi_qm *qm);
402 void (*hw_error_init)(struct hisi_qm *qm);
403 void (*hw_error_uninit)(struct hisi_qm *qm);
404 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
405 int (*set_msi)(struct hisi_qm *qm, bool set);
408 int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num);
409 void (*set_ifc_end)(struct hisi_qm *qm);
410 int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num);
469 static void qm_irqs_unregister(struct hisi_qm *qm);
470 static int qm_reset_device(struct hisi_qm *qm);
503 static u32 qm_get_hw_error_status(struct hisi_qm *qm) in qm_get_hw_error_status() argument
505 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_get_hw_error_status()
508 static u32 qm_get_dev_err_status(struct hisi_qm *qm) in qm_get_dev_err_status() argument
510 return qm->err_ini->get_dev_hw_err_status(qm); in qm_get_dev_err_status()
514 static bool qm_check_dev_error(struct hisi_qm *qm) in qm_check_dev_error() argument
516 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in qm_check_dev_error()
532 static int qm_wait_reset_finish(struct hisi_qm *qm) in qm_wait_reset_finish() argument
537 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_wait_reset_finish()
546 static int qm_reset_prepare_ready(struct hisi_qm *qm) in qm_reset_prepare_ready() argument
548 struct pci_dev *pdev = qm->pdev; in qm_reset_prepare_ready()
555 if (qm->ver < QM_HW_V3) in qm_reset_prepare_ready()
558 return qm_wait_reset_finish(qm); in qm_reset_prepare_ready()
561 static void qm_reset_bit_clear(struct hisi_qm *qm) in qm_reset_bit_clear() argument
563 struct pci_dev *pdev = qm->pdev; in qm_reset_bit_clear()
566 if (qm->ver < QM_HW_V3) in qm_reset_bit_clear()
569 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_reset_bit_clear()
585 int hisi_qm_wait_mb_ready(struct hisi_qm *qm) in hisi_qm_wait_mb_ready() argument
589 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, in hisi_qm_wait_mb_ready()
596 static void qm_mb_write(struct hisi_qm *qm, const void *src) in qm_mb_write() argument
598 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; in qm_mb_write()
622 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) in qm_mb_nolock() argument
627 if (unlikely(hisi_qm_wait_mb_ready(qm))) { in qm_mb_nolock()
628 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); in qm_mb_nolock()
633 qm_mb_write(qm, mailbox); in qm_mb_nolock()
635 if (unlikely(hisi_qm_wait_mb_ready(qm))) { in qm_mb_nolock()
636 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); in qm_mb_nolock()
641 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); in qm_mb_nolock()
643 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); in qm_mb_nolock()
651 atomic64_inc(&qm->debug.dfx.mb_err_cnt); in qm_mb_nolock()
655 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, in hisi_qm_mb() argument
663 mutex_lock(&qm->mailbox_lock); in hisi_qm_mb()
664 ret = qm_mb_nolock(qm, &mailbox); in hisi_qm_mb()
665 mutex_unlock(&qm->mailbox_lock); in hisi_qm_mb()
672 int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) in qm_set_and_get_xqc() argument
683 tmp_xqc = qm->xqc_buf.sqc; in qm_set_and_get_xqc()
684 xqc_dma = qm->xqc_buf.sqc_dma; in qm_set_and_get_xqc()
688 tmp_xqc = qm->xqc_buf.cqc; in qm_set_and_get_xqc()
689 xqc_dma = qm->xqc_buf.cqc_dma; in qm_set_and_get_xqc()
693 tmp_xqc = qm->xqc_buf.eqc; in qm_set_and_get_xqc()
694 xqc_dma = qm->xqc_buf.eqc_dma; in qm_set_and_get_xqc()
698 tmp_xqc = qm->xqc_buf.aeqc; in qm_set_and_get_xqc()
699 xqc_dma = qm->xqc_buf.aeqc_dma; in qm_set_and_get_xqc()
702 dev_err(&qm->pdev->dev, "unknown mailbox cmd %u\n", cmd); in qm_set_and_get_xqc()
707 if (qm_check_dev_error(qm)) { in qm_set_and_get_xqc()
708 dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n"); in qm_set_and_get_xqc()
712 mutex_lock(&qm->mailbox_lock); in qm_set_and_get_xqc()
717 ret = qm_mb_nolock(qm, &mailbox); in qm_set_and_get_xqc()
721 mutex_unlock(&qm->mailbox_lock); in qm_set_and_get_xqc()
726 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) in qm_db_v1() argument
734 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); in qm_db_v1()
737 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) in qm_db_v2() argument
739 void __iomem *io_base = qm->io_base; in qm_db_v2()
744 io_base = qm->db_io_base + (u64)qn * qm->db_interval + in qm_db_v2()
757 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) in qm_db() argument
759 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", in qm_db()
762 qm->ops->qm_db(qm, qn, cmd, index, priority); in qm_db()
765 static void qm_disable_clock_gate(struct hisi_qm *qm) in qm_disable_clock_gate() argument
769 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */ in qm_disable_clock_gate()
770 if (qm->ver < QM_HW_V3) in qm_disable_clock_gate()
773 val = readl(qm->io_base + QM_PM_CTRL); in qm_disable_clock_gate()
775 writel(val, qm->io_base + QM_PM_CTRL); in qm_disable_clock_gate()
778 static int qm_dev_mem_reset(struct hisi_qm *qm) in qm_dev_mem_reset() argument
782 writel(0x1, qm->io_base + QM_MEM_START_INIT); in qm_dev_mem_reset()
783 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, in qm_dev_mem_reset()
790 * @qm: The qm which want to get information.
797 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, in hisi_qm_get_hw_info() argument
803 switch (qm->ver) { in hisi_qm_get_hw_info()
812 val = readl(qm->io_base + info_table[index].offset); in hisi_qm_get_hw_info()
818 u32 hisi_qm_get_cap_value(struct hisi_qm *qm, in hisi_qm_get_cap_value() argument
824 switch (qm->ver) { in hisi_qm_get_cap_value()
833 val = readl(qm->io_base + info_table[index].offset); in hisi_qm_get_cap_value()
839 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, in qm_get_xqc_depth() argument
844 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); in qm_get_xqc_depth()
849 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, in hisi_qm_set_algs() argument
852 struct device *dev = &qm->pdev->dev; in hisi_qm_set_algs()
856 if (!qm->uacce) in hisi_qm_set_algs()
877 qm->uacce->algs = algs; in hisi_qm_set_algs()
883 static u32 qm_get_irq_num(struct hisi_qm *qm) in qm_get_irq_num() argument
885 if (qm->fun_type == QM_HW_PF) in qm_get_irq_num()
886 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); in qm_get_irq_num()
888 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); in qm_get_irq_num()
891 static int qm_pm_get_sync(struct hisi_qm *qm) in qm_pm_get_sync() argument
893 struct device *dev = &qm->pdev->dev; in qm_pm_get_sync()
896 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in qm_pm_get_sync()
908 static void qm_pm_put_sync(struct hisi_qm *qm) in qm_pm_put_sync() argument
910 struct device *dev = &qm->pdev->dev; in qm_pm_put_sync()
912 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in qm_pm_put_sync()
932 struct hisi_qm *qm = qp->qm; in qm_poll_req_cb() local
936 qp->req_cb(qp, qp->sqe + qm->sqe_size * in qm_poll_req_cb()
940 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, in qm_poll_req_cb()
948 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); in qm_poll_req_cb()
955 struct hisi_qm *qm = poll_data->qm; in qm_work_process() local
961 qp = &qm->qp_array[poll_data->qp_finish_id[i]]; in qm_work_process()
975 static void qm_get_complete_eqe_num(struct hisi_qm *qm) in qm_get_complete_eqe_num() argument
977 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; in qm_get_complete_eqe_num()
979 u16 eq_depth = qm->eq_depth; in qm_get_complete_eqe_num()
982 if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { in qm_get_complete_eqe_num()
983 atomic64_inc(&qm->debug.dfx.err_irq_cnt); in qm_get_complete_eqe_num()
984 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
989 if (unlikely(cqn >= qm->qp_num)) in qm_get_complete_eqe_num()
991 poll_data = &qm->poll_data[cqn]; in qm_get_complete_eqe_num()
993 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { in qm_get_complete_eqe_num()
998 if (qm->status.eq_head == eq_depth - 1) { in qm_get_complete_eqe_num()
999 qm->status.eqc_phase = !qm->status.eqc_phase; in qm_get_complete_eqe_num()
1000 eqe = qm->eqe; in qm_get_complete_eqe_num()
1001 qm->status.eq_head = 0; in qm_get_complete_eqe_num()
1004 qm->status.eq_head++; in qm_get_complete_eqe_num()
1012 queue_work(qm->wq, &poll_data->work); in qm_get_complete_eqe_num()
1013 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
1018 struct hisi_qm *qm = data; in qm_eq_irq() local
1021 qm_get_complete_eqe_num(qm); in qm_eq_irq()
1028 struct hisi_qm *qm = data; in qm_mb_cmd_irq() local
1031 val = readl(qm->io_base + QM_IFC_INT_STATUS); in qm_mb_cmd_irq()
1036 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { in qm_mb_cmd_irq()
1037 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); in qm_mb_cmd_irq()
1041 schedule_work(&qm->cmd_process); in qm_mb_cmd_irq()
1060 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) in qm_disable_qp() argument
1062 struct hisi_qp *qp = &qm->qp_array[qp_id]; in qm_disable_qp()
1069 static void qm_reset_function(struct hisi_qm *qm) in qm_reset_function() argument
1071 struct device *dev = &qm->pdev->dev; in qm_reset_function()
1074 if (qm_check_dev_error(qm)) in qm_reset_function()
1077 ret = qm_reset_prepare_ready(qm); in qm_reset_function()
1083 ret = hisi_qm_stop(qm, QM_DOWN); in qm_reset_function()
1085 dev_err(dev, "failed to stop qm when reset function\n"); in qm_reset_function()
1089 ret = hisi_qm_start(qm); in qm_reset_function()
1091 dev_err(dev, "failed to start qm when reset function\n"); in qm_reset_function()
1094 qm_reset_bit_clear(qm); in qm_reset_function()
1099 struct hisi_qm *qm = data; in qm_aeq_thread() local
1100 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; in qm_aeq_thread()
1101 u16 aeq_depth = qm->aeq_depth; in qm_aeq_thread()
1104 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); in qm_aeq_thread()
1106 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { in qm_aeq_thread()
1113 dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); in qm_aeq_thread()
1114 qm_reset_function(qm); in qm_aeq_thread()
1117 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", in qm_aeq_thread()
1121 qm_disable_qp(qm, qp_id); in qm_aeq_thread()
1124 dev_err(&qm->pdev->dev, "unknown error type %u\n", in qm_aeq_thread()
1129 if (qm->status.aeq_head == aeq_depth - 1) { in qm_aeq_thread()
1130 qm->status.aeqc_phase = !qm->status.aeqc_phase; in qm_aeq_thread()
1131 aeqe = qm->aeqe; in qm_aeq_thread()
1132 qm->status.aeq_head = 0; in qm_aeq_thread()
1135 qm->status.aeq_head++; in qm_aeq_thread()
1139 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_aeq_thread()
1154 static void qm_init_prefetch(struct hisi_qm *qm) in qm_init_prefetch() argument
1156 struct device *dev = &qm->pdev->dev; in qm_init_prefetch()
1159 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) in qm_init_prefetch()
1177 writel(page_type, qm->io_base + QM_PAGE_SIZE); in qm_init_prefetch()
1247 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, in qm_vft_data_cfg() argument
1255 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1268 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1289 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); in qm_vft_data_cfg()
1290 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); in qm_vft_data_cfg()
1293 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, in qm_set_vft_common() argument
1300 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in qm_set_vft_common()
1301 factor = &qm->factor[fun_num]; in qm_set_vft_common()
1303 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1309 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); in qm_set_vft_common()
1310 writel(type, qm->io_base + QM_VFT_CFG_TYPE); in qm_set_vft_common()
1314 writel(fun_num, qm->io_base + QM_VFT_CFG); in qm_set_vft_common()
1316 qm_vft_data_cfg(qm, type, base, number, factor); in qm_set_vft_common()
1318 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_set_vft_common()
1319 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_set_vft_common()
1321 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1326 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) in qm_shaper_init_vft() argument
1328 u32 qos = qm->factor[fun_num].func_qos; in qm_shaper_init_vft()
1331 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); in qm_shaper_init_vft()
1333 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); in qm_shaper_init_vft()
1336 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); in qm_shaper_init_vft()
1339 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); in qm_shaper_init_vft()
1348 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, in qm_set_sqc_cqc_vft() argument
1354 ret = qm_set_vft_common(qm, i, fun_num, base, number); in qm_set_sqc_cqc_vft()
1360 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { in qm_set_sqc_cqc_vft()
1361 ret = qm_shaper_init_vft(qm, fun_num); in qm_set_sqc_cqc_vft()
1369 qm_set_vft_common(qm, i, fun_num, 0, 0); in qm_set_sqc_cqc_vft()
1374 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) in qm_get_vft_v2() argument
1379 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); in qm_get_vft_v2()
1383 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_vft_v2()
1384 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_vft_v2()
1392 static void qm_hw_error_init_v1(struct hisi_qm *qm) in qm_hw_error_init_v1() argument
1394 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v1()
1397 static void qm_hw_error_cfg(struct hisi_qm *qm) in qm_hw_error_cfg() argument
1399 struct hisi_qm_err_info *err_info = &qm->err_info; in qm_hw_error_cfg()
1401 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; in qm_hw_error_cfg()
1402 /* clear QM hw residual error source */ in qm_hw_error_cfg()
1403 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_cfg()
1406 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); in qm_hw_error_cfg()
1407 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); in qm_hw_error_cfg()
1408 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_cfg()
1409 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); in qm_hw_error_cfg()
1412 static void qm_hw_error_init_v2(struct hisi_qm *qm) in qm_hw_error_init_v2() argument
1416 qm_hw_error_cfg(qm); in qm_hw_error_init_v2()
1418 irq_unmask = ~qm->error_mask; in qm_hw_error_init_v2()
1419 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1420 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1423 static void qm_hw_error_uninit_v2(struct hisi_qm *qm) in qm_hw_error_uninit_v2() argument
1425 u32 irq_mask = qm->error_mask; in qm_hw_error_uninit_v2()
1427 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1428 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1431 static void qm_hw_error_init_v3(struct hisi_qm *qm) in qm_hw_error_init_v3() argument
1435 qm_hw_error_cfg(qm); in qm_hw_error_init_v3()
1438 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_init_v3()
1440 irq_unmask = ~qm->error_mask; in qm_hw_error_init_v3()
1441 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1442 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1445 static void qm_hw_error_uninit_v3(struct hisi_qm *qm) in qm_hw_error_uninit_v3() argument
1447 u32 irq_mask = qm->error_mask; in qm_hw_error_uninit_v3()
1449 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v3()
1450 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v3()
1453 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_uninit_v3()
1456 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) in qm_log_hw_error() argument
1459 struct device *dev = &qm->pdev->dev; in qm_log_hw_error()
1472 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); in qm_log_hw_error()
1477 dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n", in qm_log_hw_error()
1480 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); in qm_log_hw_error()
1486 dev_err(dev, "qm %s fifo overflow in function %u qp %u\n", in qm_log_hw_error()
1491 reg_val = readl(qm->io_base + QM_ABNORMAL_INF02); in qm_log_hw_error()
1493 dev_err(dev, "qm axi poison error happened\n"); in qm_log_hw_error()
1498 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) in qm_hw_error_handle_v2() argument
1502 error_status = qm_get_hw_error_status(qm); in qm_hw_error_handle_v2()
1503 if (error_status & qm->error_mask) { in qm_hw_error_handle_v2()
1505 qm->err_status.is_qm_ecc_mbit = true; in qm_hw_error_handle_v2()
1507 qm_log_hw_error(qm, error_status); in qm_hw_error_handle_v2()
1508 if (error_status & qm->err_info.qm_reset_mask) { in qm_hw_error_handle_v2()
1510 writel(qm->err_info.nfe & (~error_status), in qm_hw_error_handle_v2()
1511 qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_handle_v2()
1516 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_handle_v2()
1517 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_handle_v2()
1518 writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE); in qm_hw_error_handle_v2()
1524 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) in qm_get_mb_cmd() argument
1530 mutex_lock(&qm->mailbox_lock); in qm_get_mb_cmd()
1531 ret = qm_mb_nolock(qm, &mailbox); in qm_get_mb_cmd()
1535 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_mb_cmd()
1536 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_mb_cmd()
1539 mutex_unlock(&qm->mailbox_lock); in qm_get_mb_cmd()
1543 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) in qm_clear_cmd_interrupt() argument
1547 if (qm->fun_type == QM_HW_PF) in qm_clear_cmd_interrupt()
1548 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); in qm_clear_cmd_interrupt()
1550 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
1552 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
1555 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) in qm_handle_vf_msg() argument
1557 struct device *dev = &qm->pdev->dev; in qm_handle_vf_msg()
1561 ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id); in qm_handle_vf_msg()
1583 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) in qm_wait_vf_prepare_finish() argument
1585 struct device *dev = &qm->pdev->dev; in qm_wait_vf_prepare_finish()
1586 u32 vfs_num = qm->vfs_num; in qm_wait_vf_prepare_finish()
1592 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_wait_vf_prepare_finish()
1596 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_wait_vf_prepare_finish()
1612 qm_handle_vf_msg(qm, i); in qm_wait_vf_prepare_finish()
1618 qm_clear_cmd_interrupt(qm, val); in qm_wait_vf_prepare_finish()
1623 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num) in qm_trigger_vf_interrupt() argument
1627 val = readl(qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
1630 writel(val, qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
1632 val = readl(qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
1634 writel(val, qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
1637 static void qm_trigger_pf_interrupt(struct hisi_qm *qm) in qm_trigger_pf_interrupt() argument
1641 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
1643 writel(val, qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
1646 static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num) in qm_ping_single_vf() argument
1648 struct device *dev = &qm->pdev->dev; in qm_ping_single_vf()
1653 ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num); in qm_ping_single_vf()
1659 qm_trigger_vf_interrupt(qm, fun_num); in qm_ping_single_vf()
1662 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_single_vf()
1675 qm->ops->set_ifc_end(qm); in qm_ping_single_vf()
1679 static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd) in qm_ping_all_vfs() argument
1681 struct device *dev = &qm->pdev->dev; in qm_ping_all_vfs()
1682 u32 vfs_num = qm->vfs_num; in qm_ping_all_vfs()
1688 ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS); in qm_ping_all_vfs()
1691 qm->ops->set_ifc_end(qm); in qm_ping_all_vfs()
1695 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS); in qm_ping_all_vfs()
1698 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_all_vfs()
1701 qm->ops->set_ifc_end(qm); in qm_ping_all_vfs()
1709 qm->ops->set_ifc_end(qm); in qm_ping_all_vfs()
1720 static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd) in qm_ping_pf() argument
1726 ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0); in qm_ping_pf()
1728 dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd); in qm_ping_pf()
1732 qm_trigger_pf_interrupt(qm); in qm_ping_pf()
1736 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_ping_pf()
1747 qm->ops->set_ifc_end(qm); in qm_ping_pf()
1752 static int qm_drain_qm(struct hisi_qm *qm) in qm_drain_qm() argument
1754 return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0); in qm_drain_qm()
1759 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); in qm_stop_qp()
1762 static int qm_set_msi(struct hisi_qm *qm, bool set) in qm_set_msi() argument
1764 struct pci_dev *pdev = qm->pdev; in qm_set_msi()
1772 if (qm->err_status.is_qm_ecc_mbit || in qm_set_msi()
1773 qm->err_status.is_dev_ecc_mbit) in qm_set_msi()
1777 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) in qm_set_msi()
1784 static void qm_wait_msi_finish(struct hisi_qm *qm) in qm_wait_msi_finish() argument
1786 struct pci_dev *pdev = qm->pdev; in qm_wait_msi_finish()
1806 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, in qm_wait_msi_finish()
1812 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, in qm_wait_msi_finish()
1819 static int qm_set_msi_v3(struct hisi_qm *qm, bool set) in qm_set_msi_v3() argument
1821 struct pci_dev *pdev = qm->pdev; in qm_set_msi_v3()
1842 qm_wait_msi_finish(qm); in qm_set_msi_v3()
1849 static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num) in qm_set_ifc_begin_v3() argument
1857 mutex_lock(&qm->mailbox_lock); in qm_set_ifc_begin_v3()
1858 return qm_mb_nolock(qm, &mailbox); in qm_set_ifc_begin_v3()
1861 static void qm_set_ifc_end_v3(struct hisi_qm *qm) in qm_set_ifc_end_v3() argument
1863 mutex_unlock(&qm->mailbox_lock); in qm_set_ifc_end_v3()
1866 static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num) in qm_get_ifc_v3() argument
1871 ret = qm_get_mb_cmd(qm, &msg, fun_num); in qm_get_ifc_v3()
1883 static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num) in qm_set_ifc_begin_v4() argument
1888 if (qm->fun_type == QM_HW_PF) in qm_set_ifc_begin_v4()
1895 mutex_lock(&qm->ifc_lock); in qm_set_ifc_begin_v4()
1896 writeq(msg, qm->io_base + offset); in qm_set_ifc_begin_v4()
1901 static void qm_set_ifc_end_v4(struct hisi_qm *qm) in qm_set_ifc_end_v4() argument
1903 mutex_unlock(&qm->ifc_lock); in qm_set_ifc_end_v4()
1906 static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num) in qm_get_ifc_pf() argument
1912 return (u64)readl(qm->io_base + offset); in qm_get_ifc_pf()
1915 static u64 qm_get_ifc_vf(struct hisi_qm *qm) in qm_get_ifc_vf() argument
1917 return readq(qm->io_base + QM_PF2VF_VF_R); in qm_get_ifc_vf()
1920 static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num) in qm_get_ifc_v4() argument
1924 if (qm->fun_type == QM_HW_PF) in qm_get_ifc_v4()
1925 msg = qm_get_ifc_pf(qm, fun_num); in qm_get_ifc_v4()
1927 msg = qm_get_ifc_vf(qm); in qm_get_ifc_v4()
1984 return qp->sqe + sq_tail * qp->qm->sqe_size; in qm_get_avail_sqe()
1996 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) in qm_create_qp_nolock() argument
1998 struct device *dev = &qm->pdev->dev; in qm_create_qp_nolock()
2002 if (atomic_read(&qm->status.flags) == QM_STOP) { in qm_create_qp_nolock()
2003 dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n"); in qm_create_qp_nolock()
2007 if (qm->qp_in_used == qm->qp_num) { in qm_create_qp_nolock()
2008 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", in qm_create_qp_nolock()
2009 qm->qp_num); in qm_create_qp_nolock()
2010 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
2014 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); in qm_create_qp_nolock()
2016 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", in qm_create_qp_nolock()
2017 qm->qp_num); in qm_create_qp_nolock()
2018 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
2022 qp = &qm->qp_array[qp_id]; in qm_create_qp_nolock()
2031 qm->qp_in_used++; in qm_create_qp_nolock()
2037 * hisi_qm_create_qp() - Create a queue pair from qm.
2038 * @qm: The qm we create a qp from.
2043 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) in hisi_qm_create_qp() argument
2048 ret = qm_pm_get_sync(qm); in hisi_qm_create_qp()
2052 down_write(&qm->qps_lock); in hisi_qm_create_qp()
2053 qp = qm_create_qp_nolock(qm, alg_type); in hisi_qm_create_qp()
2054 up_write(&qm->qps_lock); in hisi_qm_create_qp()
2057 qm_pm_put_sync(qm); in hisi_qm_create_qp()
2063 * hisi_qm_release_qp() - Release a qp back to its qm.
2070 struct hisi_qm *qm = qp->qm; in hisi_qm_release_qp() local
2072 down_write(&qm->qps_lock); in hisi_qm_release_qp()
2074 qm->qp_in_used--; in hisi_qm_release_qp()
2075 idr_remove(&qm->qp_idr, qp->qp_id); in hisi_qm_release_qp()
2077 up_write(&qm->qps_lock); in hisi_qm_release_qp()
2079 qm_pm_put_sync(qm); in hisi_qm_release_qp()
2084 struct hisi_qm *qm = qp->qm; in qm_sq_ctx_cfg() local
2085 enum qm_hw_ver ver = qm->ver; in qm_sq_ctx_cfg()
2089 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); in qm_sq_ctx_cfg()
2092 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); in qm_sq_ctx_cfg()
2101 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_sq_ctx_cfg()
2105 return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0); in qm_sq_ctx_cfg()
2110 struct hisi_qm *qm = qp->qm; in qm_cq_ctx_cfg() local
2111 enum qm_hw_ver ver = qm->ver; in qm_cq_ctx_cfg()
2131 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_cq_ctx_cfg()
2134 return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0); in qm_cq_ctx_cfg()
2152 struct hisi_qm *qm = qp->qm; in qm_start_qp_nolock() local
2153 struct device *dev = &qm->pdev->dev; in qm_start_qp_nolock()
2158 if (atomic_read(&qm->status.flags) == QM_STOP) { in qm_start_qp_nolock()
2159 dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n"); in qm_start_qp_nolock()
2183 struct hisi_qm *qm = qp->qm; in hisi_qm_start_qp() local
2186 down_write(&qm->qps_lock); in hisi_qm_start_qp()
2188 up_write(&qm->qps_lock); in hisi_qm_start_qp()
2206 struct hisi_qm *qm = qp->qm; in qp_stop_fail_cb() local
2212 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); in qp_stop_fail_cb()
2217 static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id) in qm_wait_qp_empty() argument
2219 struct device *dev = &qm->pdev->dev; in qm_wait_qp_empty()
2225 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); in qm_wait_qp_empty()
2232 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); in qm_wait_qp_empty()
2265 struct hisi_qm *qm = qp->qm; in qm_drain_qp() local
2270 if (qm_check_dev_error(qm)) in qm_drain_qp()
2274 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { in qm_drain_qp()
2277 dev_err(&qm->pdev->dev, "Failed to stop qp!\n"); in qm_drain_qp()
2284 ret = qm_wait_qp_empty(qm, &state, qp->qp_id); in qm_drain_qp()
2291 if (qm->debug.dev_dfx.dev_timeout) in qm_drain_qp()
2292 qm->debug.dev_dfx.dev_state = state; in qm_drain_qp()
2299 struct hisi_qm *qm = qp->qm; in qm_stop_qp_nolock() local
2300 struct device *dev = &qm->pdev->dev; in qm_stop_qp_nolock()
2317 if (qm->ver < QM_HW_V3 || qm->status.stop_reason == QM_NORMAL) { in qm_stop_qp_nolock()
2323 flush_workqueue(qm->wq); in qm_stop_qp_nolock()
2331 * hisi_qm_stop_qp() - Stop a qp in qm.
2338 down_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2340 up_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2350 * if qp related qm is resetting.
2355 * causes current qm_db sending fail or can not receive sended sqe. QM
2367 atomic_read(&qp->qm->status.flags) == QM_STOP || in hisi_qp_send()
2369 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); in hisi_qp_send()
2376 memcpy(sqe, msg, qp->qm->sqe_size); in hisi_qp_send()
2378 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); in hisi_qp_send()
2386 static void hisi_qm_cache_wb(struct hisi_qm *qm) in hisi_qm_cache_wb() argument
2390 if (qm->ver == QM_HW_V1) in hisi_qm_cache_wb()
2393 writel(0x1, qm->io_base + QM_CACHE_WB_START); in hisi_qm_cache_wb()
2394 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, in hisi_qm_cache_wb()
2397 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); in hisi_qm_cache_wb()
2405 /* This function returns free number of qp in qm. */
2408 struct hisi_qm *qm = uacce->priv; in hisi_qm_get_available_instances() local
2411 down_read(&qm->qps_lock); in hisi_qm_get_available_instances()
2412 ret = qm->qp_num - qm->qp_in_used; in hisi_qm_get_available_instances()
2413 up_read(&qm->qps_lock); in hisi_qm_get_available_instances()
2418 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) in hisi_qm_set_hw_reset() argument
2422 for (i = 0; i < qm->qp_num; i++) in hisi_qm_set_hw_reset()
2423 qm_set_qp_disable(&qm->qp_array[i], offset); in hisi_qm_set_hw_reset()
2430 struct hisi_qm *qm = uacce->priv; in hisi_qm_uacce_get_queue() local
2434 qp = hisi_qm_create_qp(qm, alg_type); in hisi_qm_uacce_get_queue()
2461 struct hisi_qm *qm = qp->qm; in hisi_qm_uacce_mmap() local
2462 resource_size_t phys_base = qm->db_phys_base + in hisi_qm_uacce_mmap()
2463 qp->qp_id * qm->db_interval; in hisi_qm_uacce_mmap()
2465 struct pci_dev *pdev = qm->pdev; in hisi_qm_uacce_mmap()
2472 if (qm->ver == QM_HW_V1) { in hisi_qm_uacce_mmap()
2475 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { in hisi_qm_uacce_mmap()
2480 if (sz > qm->db_interval) in hisi_qm_uacce_mmap()
2519 struct hisi_qm *qm = qp->qm; in hisi_qm_uacce_stop_queue() local
2520 struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx; in hisi_qm_uacce_stop_queue()
2537 dev_err(&qm->pdev->dev, "Stop q %u timeout, state %u\n", in hisi_qm_uacce_stop_queue()
2564 struct hisi_qm *qm = q->uacce->priv; in qm_set_sqctype() local
2567 down_write(&qm->qps_lock); in qm_set_sqctype()
2569 up_write(&qm->qps_lock); in qm_set_sqctype()
2600 qp_info.sqe_size = qp->qm->sqe_size; in hisi_qm_uacce_ioctl()
2617 * @qm: the uacce device
2619 static int qm_hw_err_isolate(struct hisi_qm *qm) in qm_hw_err_isolate() argument
2625 isolate = &qm->isolate_data; in qm_hw_err_isolate()
2630 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) in qm_hw_err_isolate()
2663 static void qm_hw_err_destroy(struct hisi_qm *qm) in qm_hw_err_destroy() argument
2667 mutex_lock(&qm->isolate_data.isolate_lock); in qm_hw_err_destroy()
2668 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { in qm_hw_err_destroy()
2672 mutex_unlock(&qm->isolate_data.isolate_lock); in qm_hw_err_destroy()
2677 struct hisi_qm *qm = uacce->priv; in hisi_qm_get_isolate_state() local
2681 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in hisi_qm_get_isolate_state()
2683 pf_qm = qm; in hisi_qm_get_isolate_state()
2691 struct hisi_qm *qm = uacce->priv; in hisi_qm_isolate_threshold_write() local
2697 if (qm->isolate_data.is_isolate) in hisi_qm_isolate_threshold_write()
2700 qm->isolate_data.err_threshold = num; in hisi_qm_isolate_threshold_write()
2703 qm_hw_err_destroy(qm); in hisi_qm_isolate_threshold_write()
2710 struct hisi_qm *qm = uacce->priv; in hisi_qm_isolate_threshold_read() local
2714 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in hisi_qm_isolate_threshold_read()
2718 return qm->isolate_data.err_threshold; in hisi_qm_isolate_threshold_read()
2735 static void qm_remove_uacce(struct hisi_qm *qm) in qm_remove_uacce() argument
2737 struct uacce_device *uacce = qm->uacce; in qm_remove_uacce()
2739 if (qm->use_sva) { in qm_remove_uacce()
2740 qm_hw_err_destroy(qm); in qm_remove_uacce()
2742 qm->uacce = NULL; in qm_remove_uacce()
2746 static int qm_alloc_uacce(struct hisi_qm *qm) in qm_alloc_uacce() argument
2748 struct pci_dev *pdev = qm->pdev; in qm_alloc_uacce()
2769 qm->use_sva = true; in qm_alloc_uacce()
2772 qm_remove_uacce(qm); in qm_alloc_uacce()
2777 uacce->priv = qm; in qm_alloc_uacce()
2779 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
2781 else if (qm->ver == QM_HW_V2) in qm_alloc_uacce()
2786 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
2788 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_alloc_uacce()
2792 mmio_page_nr = qm->db_interval / PAGE_SIZE; in qm_alloc_uacce()
2794 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); in qm_alloc_uacce()
2797 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + in qm_alloc_uacce()
2804 qm->uacce = uacce; in qm_alloc_uacce()
2805 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); in qm_alloc_uacce()
2806 mutex_init(&qm->isolate_data.isolate_lock); in qm_alloc_uacce()
2812 * qm_frozen() - Try to froze QM to cut continuous queue request. If
2813 * there is user on the QM, return failure without doing anything.
2814 * @qm: The qm needed to be fronzen.
2816 * This function frozes QM, then we can do SRIOV disabling.
2818 static int qm_frozen(struct hisi_qm *qm) in qm_frozen() argument
2820 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) in qm_frozen()
2823 down_write(&qm->qps_lock); in qm_frozen()
2825 if (!qm->qp_in_used) { in qm_frozen()
2826 qm->qp_in_used = qm->qp_num; in qm_frozen()
2827 up_write(&qm->qps_lock); in qm_frozen()
2828 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); in qm_frozen()
2832 up_write(&qm->qps_lock); in qm_frozen()
2840 struct hisi_qm *qm, *vf_qm; in qm_try_frozen_vfs() local
2849 list_for_each_entry(qm, &qm_list->list, list) { in qm_try_frozen_vfs()
2850 dev = qm->pdev; in qm_try_frozen_vfs()
2870 * @qm: The qm needed to wait for the task to finish.
2873 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) in hisi_qm_wait_task_finish() argument
2875 while (qm_frozen(qm) || in hisi_qm_wait_task_finish()
2876 ((qm->fun_type == QM_HW_PF) && in hisi_qm_wait_task_finish()
2877 qm_try_frozen_vfs(qm->pdev, qm_list))) { in hisi_qm_wait_task_finish()
2881 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || in hisi_qm_wait_task_finish()
2882 test_bit(QM_RESETTING, &qm->misc_ctl)) in hisi_qm_wait_task_finish()
2885 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in hisi_qm_wait_task_finish()
2886 flush_work(&qm->cmd_process); in hisi_qm_wait_task_finish()
2892 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) in hisi_qp_memory_uninit() argument
2894 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_uninit()
2899 qdma = &qm->qp_array[i].qdma; in hisi_qp_memory_uninit()
2901 kfree(qm->poll_data[i].qp_finish_id); in hisi_qp_memory_uninit()
2904 kfree(qm->poll_data); in hisi_qp_memory_uninit()
2905 kfree(qm->qp_array); in hisi_qp_memory_uninit()
2908 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, in hisi_qp_memory_init() argument
2911 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_init()
2912 size_t off = qm->sqe_size * sq_depth; in hisi_qp_memory_init()
2916 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), in hisi_qp_memory_init()
2918 if (!qm->poll_data[id].qp_finish_id) in hisi_qp_memory_init()
2921 qp = &qm->qp_array[id]; in hisi_qp_memory_init()
2934 qp->qm = qm; in hisi_qp_memory_init()
2940 kfree(qm->poll_data[id].qp_finish_id); in hisi_qp_memory_init()
2944 static void hisi_qm_pre_init(struct hisi_qm *qm) in hisi_qm_pre_init() argument
2946 struct pci_dev *pdev = qm->pdev; in hisi_qm_pre_init()
2948 if (qm->ver == QM_HW_V1) in hisi_qm_pre_init()
2949 qm->ops = &qm_hw_ops_v1; in hisi_qm_pre_init()
2950 else if (qm->ver == QM_HW_V2) in hisi_qm_pre_init()
2951 qm->ops = &qm_hw_ops_v2; in hisi_qm_pre_init()
2952 else if (qm->ver == QM_HW_V3) in hisi_qm_pre_init()
2953 qm->ops = &qm_hw_ops_v3; in hisi_qm_pre_init()
2955 qm->ops = &qm_hw_ops_v4; in hisi_qm_pre_init()
2957 pci_set_drvdata(pdev, qm); in hisi_qm_pre_init()
2958 mutex_init(&qm->mailbox_lock); in hisi_qm_pre_init()
2959 mutex_init(&qm->ifc_lock); in hisi_qm_pre_init()
2960 init_rwsem(&qm->qps_lock); in hisi_qm_pre_init()
2961 qm->qp_in_used = 0; in hisi_qm_pre_init()
2962 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { in hisi_qm_pre_init()
2968 static void qm_cmd_uninit(struct hisi_qm *qm) in qm_cmd_uninit() argument
2972 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_cmd_uninit()
2975 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
2977 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
2980 static void qm_cmd_init(struct hisi_qm *qm) in qm_cmd_init() argument
2984 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_cmd_init()
2988 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR); in qm_cmd_init()
2991 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
2993 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
2996 static void qm_put_pci_res(struct hisi_qm *qm) in qm_put_pci_res() argument
2998 struct pci_dev *pdev = qm->pdev; in qm_put_pci_res()
3000 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_put_pci_res()
3001 iounmap(qm->db_io_base); in qm_put_pci_res()
3003 iounmap(qm->io_base); in qm_put_pci_res()
3007 static void hisi_qm_pci_uninit(struct hisi_qm *qm) in hisi_qm_pci_uninit() argument
3009 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_uninit()
3012 qm_put_pci_res(qm); in hisi_qm_pci_uninit()
3016 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) in hisi_qm_set_state() argument
3018 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) in hisi_qm_set_state()
3019 writel(state, qm->io_base + QM_VF_STATE); in hisi_qm_set_state()
3022 static void hisi_qm_unint_work(struct hisi_qm *qm) in hisi_qm_unint_work() argument
3024 destroy_workqueue(qm->wq); in hisi_qm_unint_work()
3027 static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) in hisi_qm_free_rsv_buf() argument
3029 struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; in hisi_qm_free_rsv_buf()
3030 struct device *dev = &qm->pdev->dev; in hisi_qm_free_rsv_buf()
3035 static void hisi_qm_memory_uninit(struct hisi_qm *qm) in hisi_qm_memory_uninit() argument
3037 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_uninit()
3039 hisi_qp_memory_uninit(qm, qm->qp_num); in hisi_qm_memory_uninit()
3040 hisi_qm_free_rsv_buf(qm); in hisi_qm_memory_uninit()
3041 if (qm->qdma.va) { in hisi_qm_memory_uninit()
3042 hisi_qm_cache_wb(qm); in hisi_qm_memory_uninit()
3043 dma_free_coherent(dev, qm->qdma.size, in hisi_qm_memory_uninit()
3044 qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_uninit()
3047 idr_destroy(&qm->qp_idr); in hisi_qm_memory_uninit()
3049 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_memory_uninit()
3050 kfree(qm->factor); in hisi_qm_memory_uninit()
3054 * hisi_qm_uninit() - Uninitialize qm.
3055 * @qm: The qm needed uninit.
3057 * This function uninits qm related device resources.
3059 void hisi_qm_uninit(struct hisi_qm *qm) in hisi_qm_uninit() argument
3061 qm_cmd_uninit(qm); in hisi_qm_uninit()
3062 hisi_qm_unint_work(qm); in hisi_qm_uninit()
3064 down_write(&qm->qps_lock); in hisi_qm_uninit()
3065 hisi_qm_memory_uninit(qm); in hisi_qm_uninit()
3066 hisi_qm_set_state(qm, QM_NOT_READY); in hisi_qm_uninit()
3067 up_write(&qm->qps_lock); in hisi_qm_uninit()
3069 qm_remove_uacce(qm); in hisi_qm_uninit()
3070 qm_irqs_unregister(qm); in hisi_qm_uninit()
3071 hisi_qm_pci_uninit(qm); in hisi_qm_uninit()
3076 * hisi_qm_get_vft() - Get vft from a qm.
3077 * @qm: The qm we want to get its vft.
3081 * We can allocate multiple queues to a qm by configuring virtual function
3085 * qm hw v1 does not support this interface.
3087 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) in hisi_qm_get_vft() argument
3092 if (!qm->ops->get_vft) { in hisi_qm_get_vft()
3093 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); in hisi_qm_get_vft()
3097 return qm->ops->get_vft(qm, base, number); in hisi_qm_get_vft()
3101 * hisi_qm_set_vft() - Set vft to a qm.
3102 * @qm: The qm we want to set its vft.
3110 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
3111 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3114 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, in hisi_qm_set_vft() argument
3117 u32 max_q_num = qm->ctrl_qp_num; in hisi_qm_set_vft()
3123 return qm_set_sqc_cqc_vft(qm, fun_num, base, number); in hisi_qm_set_vft()
3126 static void qm_init_eq_aeq_status(struct hisi_qm *qm) in qm_init_eq_aeq_status() argument
3128 struct hisi_qm_status *status = &qm->status; in qm_init_eq_aeq_status()
3136 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm) in qm_enable_eq_aeq_interrupts() argument
3139 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_enable_eq_aeq_interrupts()
3140 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_enable_eq_aeq_interrupts()
3142 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
3143 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
3146 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) in qm_disable_eq_aeq_interrupts() argument
3148 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
3149 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
3152 static int qm_eq_ctx_cfg(struct hisi_qm *qm) in qm_eq_ctx_cfg() argument
3156 eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3157 eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3158 if (qm->ver == QM_HW_V1) in qm_eq_ctx_cfg()
3160 eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_eq_ctx_cfg()
3162 return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0); in qm_eq_ctx_cfg()
3165 static int qm_aeq_ctx_cfg(struct hisi_qm *qm) in qm_aeq_ctx_cfg() argument
3169 aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3170 aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3171 aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_aeq_ctx_cfg()
3173 return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0); in qm_aeq_ctx_cfg()
3176 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) in qm_eq_aeq_ctx_cfg() argument
3178 struct device *dev = &qm->pdev->dev; in qm_eq_aeq_ctx_cfg()
3181 qm_init_eq_aeq_status(qm); in qm_eq_aeq_ctx_cfg()
3183 ret = qm_eq_ctx_cfg(qm); in qm_eq_aeq_ctx_cfg()
3189 return qm_aeq_ctx_cfg(qm); in qm_eq_aeq_ctx_cfg()
3192 static int __hisi_qm_start(struct hisi_qm *qm) in __hisi_qm_start() argument
3196 WARN_ON(!qm->qdma.va); in __hisi_qm_start()
3198 if (qm->fun_type == QM_HW_PF) { in __hisi_qm_start()
3199 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); in __hisi_qm_start()
3204 ret = qm_eq_aeq_ctx_cfg(qm); in __hisi_qm_start()
3208 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); in __hisi_qm_start()
3212 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); in __hisi_qm_start()
3216 qm_init_prefetch(qm); in __hisi_qm_start()
3217 qm_enable_eq_aeq_interrupts(qm); in __hisi_qm_start()
3223 * hisi_qm_start() - start qm
3224 * @qm: The qm to be started.
3226 * This function starts a qm, then we can allocate qp from this qm.
3228 int hisi_qm_start(struct hisi_qm *qm) in hisi_qm_start() argument
3230 struct device *dev = &qm->pdev->dev; in hisi_qm_start()
3233 down_write(&qm->qps_lock); in hisi_qm_start()
3235 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); in hisi_qm_start()
3237 if (!qm->qp_num) { in hisi_qm_start()
3243 ret = __hisi_qm_start(qm); in hisi_qm_start()
3247 atomic_set(&qm->status.flags, QM_WORK); in hisi_qm_start()
3248 hisi_qm_set_state(qm, QM_READY); in hisi_qm_start()
3251 up_write(&qm->qps_lock); in hisi_qm_start()
3256 static int qm_restart(struct hisi_qm *qm) in qm_restart() argument
3258 struct device *dev = &qm->pdev->dev; in qm_restart()
3262 ret = hisi_qm_start(qm); in qm_restart()
3266 down_write(&qm->qps_lock); in qm_restart()
3267 for (i = 0; i < qm->qp_num; i++) { in qm_restart()
3268 qp = &qm->qp_array[i]; in qm_restart()
3275 up_write(&qm->qps_lock); in qm_restart()
3281 up_write(&qm->qps_lock); in qm_restart()
3287 static void qm_stop_started_qp(struct hisi_qm *qm) in qm_stop_started_qp() argument
3292 for (i = 0; i < qm->qp_num; i++) { in qm_stop_started_qp()
3293 qp = &qm->qp_array[i]; in qm_stop_started_qp()
3302 * qm_clear_queues() - Clear all queues memory in a qm.
3303 * @qm: The qm in which the queues will be cleared.
3305 * This function clears all queues memory in a qm. Reset of accelerator can
3308 static void qm_clear_queues(struct hisi_qm *qm) in qm_clear_queues() argument
3313 for (i = 0; i < qm->qp_num; i++) { in qm_clear_queues()
3314 qp = &qm->qp_array[i]; in qm_clear_queues()
3319 memset(qm->qdma.va, 0, qm->qdma.size); in qm_clear_queues()
3323 * hisi_qm_stop() - Stop a qm.
3324 * @qm: The qm which will be stopped.
3325 * @r: The reason to stop qm.
3327 * This function stops qm and its qps, then qm can not accept request.
3329 * to let qm start again.
3331 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) in hisi_qm_stop() argument
3333 struct device *dev = &qm->pdev->dev; in hisi_qm_stop()
3336 down_write(&qm->qps_lock); in hisi_qm_stop()
3338 if (atomic_read(&qm->status.flags) == QM_STOP) in hisi_qm_stop()
3342 atomic_set(&qm->status.flags, QM_STOP); in hisi_qm_stop()
3343 qm->status.stop_reason = r; in hisi_qm_stop()
3345 if (qm->status.stop_reason != QM_NORMAL) { in hisi_qm_stop()
3346 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); in hisi_qm_stop()
3352 if (test_bit(QM_SUPPORT_STOP_FUNC, &qm->caps) && in hisi_qm_stop()
3354 ret = qm_drain_qm(qm); in hisi_qm_stop()
3356 dev_err(dev, "failed to drain qm!\n"); in hisi_qm_stop()
3361 qm_stop_started_qp(qm); in hisi_qm_stop()
3363 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); in hisi_qm_stop()
3366 qm_disable_eq_aeq_interrupts(qm); in hisi_qm_stop()
3367 if (qm->fun_type == QM_HW_PF) { in hisi_qm_stop()
3368 ret = hisi_qm_set_vft(qm, 0, 0, 0); in hisi_qm_stop()
3376 qm_clear_queues(qm); in hisi_qm_stop()
3377 qm->status.stop_reason = QM_NORMAL; in hisi_qm_stop()
3380 up_write(&qm->qps_lock); in hisi_qm_stop()
3385 static void qm_hw_error_init(struct hisi_qm *qm) in qm_hw_error_init() argument
3387 if (!qm->ops->hw_error_init) { in qm_hw_error_init()
3388 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); in qm_hw_error_init()
3392 qm->ops->hw_error_init(qm); in qm_hw_error_init()
3395 static void qm_hw_error_uninit(struct hisi_qm *qm) in qm_hw_error_uninit() argument
3397 if (!qm->ops->hw_error_uninit) { in qm_hw_error_uninit()
3398 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); in qm_hw_error_uninit()
3402 qm->ops->hw_error_uninit(qm); in qm_hw_error_uninit()
3405 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) in qm_hw_error_handle() argument
3407 if (!qm->ops->hw_error_handle) { in qm_hw_error_handle()
3408 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); in qm_hw_error_handle()
3412 return qm->ops->hw_error_handle(qm); in qm_hw_error_handle()
3417 * @qm: The qm for which we want to do error initialization.
3419 * Initialize QM and device error related configuration.
3421 void hisi_qm_dev_err_init(struct hisi_qm *qm) in hisi_qm_dev_err_init() argument
3423 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_init()
3426 qm_hw_error_init(qm); in hisi_qm_dev_err_init()
3428 if (!qm->err_ini->hw_err_enable) { in hisi_qm_dev_err_init()
3429 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); in hisi_qm_dev_err_init()
3432 qm->err_ini->hw_err_enable(qm); in hisi_qm_dev_err_init()
3438 * @qm: The qm for which we want to do error uninitialization.
3440 * Uninitialize QM and device error related configuration.
3442 void hisi_qm_dev_err_uninit(struct hisi_qm *qm) in hisi_qm_dev_err_uninit() argument
3444 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_uninit()
3447 qm_hw_error_uninit(qm); in hisi_qm_dev_err_uninit()
3449 if (!qm->err_ini->hw_err_disable) { in hisi_qm_dev_err_uninit()
3450 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); in hisi_qm_dev_err_uninit()
3453 qm->err_ini->hw_err_disable(qm); in hisi_qm_dev_err_uninit()
3488 struct hisi_qm *qm; in hisi_qm_sort_devices() local
3493 list_for_each_entry(qm, &qm_list->list, list) { in hisi_qm_sort_devices()
3494 dev = &qm->pdev->dev; in hisi_qm_sort_devices()
3504 res->qm = qm; in hisi_qm_sort_devices()
3550 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); in hisi_qm_alloc_qps_node()
3574 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) in qm_vf_q_assign() argument
3577 u32 max_qp_num = qm->max_qp_num; in qm_vf_q_assign()
3578 u32 q_base = qm->qp_num; in qm_vf_q_assign()
3584 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; in qm_vf_q_assign()
3609 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); in qm_vf_q_assign()
3612 hisi_qm_set_vft(qm, j, 0, 0); in qm_vf_q_assign()
3621 static int qm_clear_vft_config(struct hisi_qm *qm) in qm_clear_vft_config() argument
3626 for (i = 1; i <= qm->vfs_num; i++) { in qm_clear_vft_config()
3627 ret = hisi_qm_set_vft(qm, i, 0, 0); in qm_clear_vft_config()
3631 qm->vfs_num = 0; in qm_clear_vft_config()
3636 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) in qm_func_shaper_enable() argument
3638 struct device *dev = &qm->pdev->dev; in qm_func_shaper_enable()
3642 total_vfs = pci_sriov_get_totalvfs(qm->pdev); in qm_func_shaper_enable()
3646 qm->factor[fun_index].func_qos = qos; in qm_func_shaper_enable()
3648 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); in qm_func_shaper_enable()
3656 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); in qm_func_shaper_enable()
3666 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) in qm_get_shaper_vft_qos() argument
3674 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
3680 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); in qm_get_shaper_vft_qos()
3681 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); in qm_get_shaper_vft_qos()
3682 writel(fun_index, qm->io_base + QM_VFT_CFG); in qm_get_shaper_vft_qos()
3684 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_get_shaper_vft_qos()
3685 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_get_shaper_vft_qos()
3687 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
3693 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | in qm_get_shaper_vft_qos()
3694 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); in qm_get_shaper_vft_qos()
3705 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; in qm_get_shaper_vft_qos()
3709 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); in qm_get_shaper_vft_qos()
3716 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) in qm_vf_get_qos() argument
3718 struct device *dev = &qm->pdev->dev; in qm_vf_get_qos()
3722 qos = qm_get_shaper_vft_qos(qm, fun_num); in qm_vf_get_qos()
3728 ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num); in qm_vf_get_qos()
3733 static int qm_vf_read_qos(struct hisi_qm *qm) in qm_vf_read_qos() argument
3739 qm->mb_qos = 0; in qm_vf_read_qos()
3742 ret = qm_ping_pf(qm, QM_VF_GET_QOS); in qm_vf_read_qos()
3744 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); in qm_vf_read_qos()
3750 if (qm->mb_qos) in qm_vf_read_qos()
3754 pci_err(qm->pdev, "PF ping VF timeout!\n"); in qm_vf_read_qos()
3765 struct hisi_qm *qm = filp->private_data; in qm_algqos_read() local
3770 ret = hisi_qm_get_dfx_access(qm); in qm_algqos_read()
3775 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_read()
3776 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); in qm_algqos_read()
3781 if (qm->fun_type == QM_HW_PF) { in qm_algqos_read()
3782 ir = qm_get_shaper_vft_qos(qm, 0); in qm_algqos_read()
3784 ret = qm_vf_read_qos(qm); in qm_algqos_read()
3787 ir = qm->mb_qos; in qm_algqos_read()
3796 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_read()
3798 hisi_qm_put_dfx_access(qm); in qm_algqos_read()
3802 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, in qm_get_qos_value() argument
3806 const struct bus_type *bus_type = qm->pdev->dev.bus; in qm_get_qos_value()
3819 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); in qm_get_qos_value()
3825 pci_err(qm->pdev, "input pci bdf number is error!\n"); in qm_get_qos_value()
3839 struct hisi_qm *qm = filp->private_data; in qm_algqos_write() local
3856 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index); in qm_algqos_write()
3861 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_write()
3862 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); in qm_algqos_write()
3866 ret = qm_pm_get_sync(qm); in qm_algqos_write()
3872 ret = qm_func_shaper_enable(qm, fun_index, val); in qm_algqos_write()
3874 pci_err(qm->pdev, "failed to enable function shaper!\n"); in qm_algqos_write()
3879 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", in qm_algqos_write()
3884 qm_pm_put_sync(qm); in qm_algqos_write()
3886 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_write()
3899 * @qm: The qm for which we want to add debugfs files.
3903 void hisi_qm_set_algqos_init(struct hisi_qm *qm) in hisi_qm_set_algqos_init() argument
3905 if (qm->fun_type == QM_HW_PF) in hisi_qm_set_algqos_init()
3906 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, in hisi_qm_set_algqos_init()
3907 qm, &qm_algqos_fops); in hisi_qm_set_algqos_init()
3908 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in hisi_qm_set_algqos_init()
3909 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, in hisi_qm_set_algqos_init()
3910 qm, &qm_algqos_fops); in hisi_qm_set_algqos_init()
3913 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) in hisi_qm_init_vf_qos() argument
3918 qm->factor[i].func_qos = QM_QOS_MAX_VAL; in hisi_qm_init_vf_qos()
3932 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_sriov_enable() local
3935 ret = qm_pm_get_sync(qm); in hisi_qm_sriov_enable()
3955 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_sriov_enable()
3956 hisi_qm_init_vf_qos(qm, num_vfs); in hisi_qm_sriov_enable()
3958 ret = qm_vf_q_assign(qm, num_vfs); in hisi_qm_sriov_enable()
3967 qm_clear_vft_config(qm); in hisi_qm_sriov_enable()
3970 qm->vfs_num = num_vfs; in hisi_qm_sriov_enable()
3977 qm_pm_put_sync(qm); in hisi_qm_sriov_enable()
3991 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_sriov_disable() local
3999 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { in hisi_qm_sriov_disable()
4006 qm->vfs_num = 0; in hisi_qm_sriov_disable()
4007 qm_pm_put_sync(qm); in hisi_qm_sriov_disable()
4009 return qm_clear_vft_config(qm); in hisi_qm_sriov_disable()
4029 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) in qm_dev_err_handle() argument
4031 if (!qm->err_ini->get_err_result) { in qm_dev_err_handle()
4032 dev_err(&qm->pdev->dev, "Device doesn't support reset!\n"); in qm_dev_err_handle()
4036 return qm->err_ini->get_err_result(qm); in qm_dev_err_handle()
4039 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) in qm_process_dev_error() argument
4043 /* log qm error */ in qm_process_dev_error()
4044 qm_ret = qm_hw_error_handle(qm); in qm_process_dev_error()
4047 dev_ret = qm_dev_err_handle(qm); in qm_process_dev_error()
4055 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
4060 * qm hardware error status when error occur.
4065 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_dev_err_detected() local
4075 ret = qm_process_dev_error(qm); in hisi_qm_dev_err_detected()
4083 static int qm_check_req_recv(struct hisi_qm *qm) in qm_check_req_recv() argument
4085 struct pci_dev *pdev = qm->pdev; in qm_check_req_recv()
4089 if (qm->ver >= QM_HW_V3) in qm_check_req_recv()
4092 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4093 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4097 dev_err(&pdev->dev, "Fails to read QM reg!\n"); in qm_check_req_recv()
4101 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4102 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4106 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); in qm_check_req_recv()
4111 static int qm_set_pf_mse(struct hisi_qm *qm, bool set) in qm_set_pf_mse() argument
4113 struct pci_dev *pdev = qm->pdev; in qm_set_pf_mse()
4135 static int qm_set_vf_mse(struct hisi_qm *qm, bool set) in qm_set_vf_mse() argument
4137 struct pci_dev *pdev = qm->pdev; in qm_set_vf_mse()
4167 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) in qm_dev_ecc_mbit_handle() argument
4172 if (qm->ver >= QM_HW_V3) in qm_dev_ecc_mbit_handle()
4175 if (!qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4176 qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4177 qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4178 qm->err_ini->close_axi_master_ooo(qm); in qm_dev_ecc_mbit_handle()
4179 } else if (qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4180 !qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4181 !qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4182 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4184 qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4185 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); in qm_dev_ecc_mbit_handle()
4189 static int qm_vf_reset_prepare(struct hisi_qm *qm, in qm_vf_reset_prepare() argument
4192 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_prepare()
4193 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_prepare()
4219 static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd, in qm_try_stop_vfs() argument
4222 struct pci_dev *pdev = qm->pdev; in qm_try_stop_vfs()
4225 if (!qm->vfs_num) in qm_try_stop_vfs()
4229 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { in qm_try_stop_vfs()
4230 ret = qm_ping_all_vfs(qm, cmd); in qm_try_stop_vfs()
4234 ret = qm_vf_reset_prepare(qm, stop_reason); in qm_try_stop_vfs()
4242 static int qm_controller_reset_prepare(struct hisi_qm *qm) in qm_controller_reset_prepare() argument
4244 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_prepare()
4247 if (qm->err_ini->set_priv_status) { in qm_controller_reset_prepare()
4248 ret = qm->err_ini->set_priv_status(qm); in qm_controller_reset_prepare()
4253 ret = qm_reset_prepare_ready(qm); in qm_controller_reset_prepare()
4259 qm_dev_ecc_mbit_handle(qm); in qm_controller_reset_prepare()
4262 qm_cmd_uninit(qm); in qm_controller_reset_prepare()
4265 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET); in qm_controller_reset_prepare()
4269 ret = hisi_qm_stop(qm, QM_SOFT_RESET); in qm_controller_reset_prepare()
4271 pci_err(pdev, "Fails to stop QM!\n"); in qm_controller_reset_prepare()
4272 qm_reset_bit_clear(qm); in qm_controller_reset_prepare()
4276 if (qm->use_sva) { in qm_controller_reset_prepare()
4277 ret = qm_hw_err_isolate(qm); in qm_controller_reset_prepare()
4282 ret = qm_wait_vf_prepare_finish(qm); in qm_controller_reset_prepare()
4286 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset_prepare()
4291 static int qm_master_ooo_check(struct hisi_qm *qm) in qm_master_ooo_check() argument
4297 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_master_ooo_check()
4298 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, in qm_master_ooo_check()
4302 pci_warn(qm->pdev, "Bus lock! Please reset system.\n"); in qm_master_ooo_check()
4307 static int qm_soft_reset_prepare(struct hisi_qm *qm) in qm_soft_reset_prepare() argument
4309 struct pci_dev *pdev = qm->pdev; in qm_soft_reset_prepare()
4312 /* Ensure all doorbells and mailboxes received by QM */ in qm_soft_reset_prepare()
4313 ret = qm_check_req_recv(qm); in qm_soft_reset_prepare()
4317 if (qm->vfs_num) { in qm_soft_reset_prepare()
4318 ret = qm_set_vf_mse(qm, false); in qm_soft_reset_prepare()
4325 ret = qm->ops->set_msi(qm, false); in qm_soft_reset_prepare()
4331 ret = qm_master_ooo_check(qm); in qm_soft_reset_prepare()
4335 if (qm->err_ini->close_sva_prefetch) in qm_soft_reset_prepare()
4336 qm->err_ini->close_sva_prefetch(qm); in qm_soft_reset_prepare()
4338 ret = qm_set_pf_mse(qm, false); in qm_soft_reset_prepare()
4345 static int qm_reset_device(struct hisi_qm *qm) in qm_reset_device() argument
4347 struct pci_dev *pdev = qm->pdev; in qm_reset_device()
4355 qm->err_info.acpi_rst, in qm_reset_device()
4374 static int qm_soft_reset(struct hisi_qm *qm) in qm_soft_reset() argument
4378 ret = qm_soft_reset_prepare(qm); in qm_soft_reset()
4382 return qm_reset_device(qm); in qm_soft_reset()
4385 static int qm_vf_reset_done(struct hisi_qm *qm) in qm_vf_reset_done() argument
4387 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_done()
4388 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_done()
4414 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd) in qm_try_start_vfs() argument
4416 struct pci_dev *pdev = qm->pdev; in qm_try_start_vfs()
4419 if (!qm->vfs_num) in qm_try_start_vfs()
4422 ret = qm_vf_q_assign(qm, qm->vfs_num); in qm_try_start_vfs()
4429 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { in qm_try_start_vfs()
4430 ret = qm_ping_all_vfs(qm, cmd); in qm_try_start_vfs()
4434 ret = qm_vf_reset_done(qm); in qm_try_start_vfs()
4442 static int qm_dev_hw_init(struct hisi_qm *qm) in qm_dev_hw_init() argument
4444 return qm->err_ini->hw_init(qm); in qm_dev_hw_init()
4447 static void qm_restart_prepare(struct hisi_qm *qm) in qm_restart_prepare() argument
4451 if (qm->err_ini->open_sva_prefetch) in qm_restart_prepare()
4452 qm->err_ini->open_sva_prefetch(qm); in qm_restart_prepare()
4454 if (qm->ver >= QM_HW_V3) in qm_restart_prepare()
4457 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_prepare()
4458 !qm->err_status.is_dev_ecc_mbit) in qm_restart_prepare()
4462 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4463 writel(value & ~qm->err_info.msi_wr_port, in qm_restart_prepare()
4464 qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4467 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; in qm_restart_prepare()
4468 if (value && qm->err_ini->clear_dev_hw_err_status) in qm_restart_prepare()
4469 qm->err_ini->clear_dev_hw_err_status(qm, value); in qm_restart_prepare()
4471 /* clear QM ecc mbit error source */ in qm_restart_prepare()
4472 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_restart_prepare()
4475 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); in qm_restart_prepare()
4478 static void qm_restart_done(struct hisi_qm *qm) in qm_restart_done() argument
4482 if (qm->ver >= QM_HW_V3) in qm_restart_done()
4485 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_done()
4486 !qm->err_status.is_dev_ecc_mbit) in qm_restart_done()
4490 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4491 value |= qm->err_info.msi_wr_port; in qm_restart_done()
4492 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4495 qm->err_status.is_qm_ecc_mbit = false; in qm_restart_done()
4496 qm->err_status.is_dev_ecc_mbit = false; in qm_restart_done()
4499 static int qm_controller_reset_done(struct hisi_qm *qm) in qm_controller_reset_done() argument
4501 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_done()
4504 ret = qm->ops->set_msi(qm, true); in qm_controller_reset_done()
4510 ret = qm_set_pf_mse(qm, true); in qm_controller_reset_done()
4516 if (qm->vfs_num) { in qm_controller_reset_done()
4517 ret = qm_set_vf_mse(qm, true); in qm_controller_reset_done()
4524 ret = qm_dev_hw_init(qm); in qm_controller_reset_done()
4530 qm_restart_prepare(qm); in qm_controller_reset_done()
4531 hisi_qm_dev_err_init(qm); in qm_controller_reset_done()
4532 if (qm->err_ini->open_axi_master_ooo) in qm_controller_reset_done()
4533 qm->err_ini->open_axi_master_ooo(qm); in qm_controller_reset_done()
4535 ret = qm_dev_mem_reset(qm); in qm_controller_reset_done()
4541 ret = qm_restart(qm); in qm_controller_reset_done()
4543 pci_err(pdev, "Failed to start QM!\n"); in qm_controller_reset_done()
4547 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); in qm_controller_reset_done()
4551 ret = qm_wait_vf_prepare_finish(qm); in qm_controller_reset_done()
4555 qm_cmd_init(qm); in qm_controller_reset_done()
4556 qm_restart_done(qm); in qm_controller_reset_done()
4558 qm_reset_bit_clear(qm); in qm_controller_reset_done()
4563 static int qm_controller_reset(struct hisi_qm *qm) in qm_controller_reset() argument
4565 struct pci_dev *pdev = qm->pdev; in qm_controller_reset()
4570 ret = qm_controller_reset_prepare(qm); in qm_controller_reset()
4572 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); in qm_controller_reset()
4573 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); in qm_controller_reset()
4574 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset()
4578 hisi_qm_show_last_dfx_regs(qm); in qm_controller_reset()
4579 if (qm->err_ini->show_last_dfx_regs) in qm_controller_reset()
4580 qm->err_ini->show_last_dfx_regs(qm); in qm_controller_reset()
4582 ret = qm_soft_reset(qm); in qm_controller_reset()
4586 ret = qm_controller_reset_done(qm); in qm_controller_reset()
4596 qm_reset_bit_clear(qm); in qm_controller_reset()
4599 if (qm->use_sva) in qm_controller_reset()
4600 qm->isolate_data.is_isolate = true; in qm_controller_reset()
4608 * This function offers QM relate PCIe device reset interface. Drivers which
4609 * use QM can use this function as slot_reset in its struct pci_error_handlers.
4613 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_dev_slot_reset() local
4620 ret = qm_controller_reset(qm); in hisi_qm_dev_slot_reset()
4633 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_reset_prepare() local
4643 while (qm_check_dev_error(qm)) { in hisi_qm_reset_prepare()
4649 ret = qm_reset_prepare_ready(qm); in hisi_qm_reset_prepare()
4656 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_prepare()
4657 qm_cmd_uninit(qm); in hisi_qm_reset_prepare()
4659 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN); in hisi_qm_reset_prepare()
4663 ret = hisi_qm_stop(qm, QM_DOWN); in hisi_qm_reset_prepare()
4665 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); in hisi_qm_reset_prepare()
4666 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); in hisi_qm_reset_prepare()
4667 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); in hisi_qm_reset_prepare()
4671 ret = qm_wait_vf_prepare_finish(qm); in hisi_qm_reset_prepare()
4682 struct hisi_qm *qm = pci_get_drvdata(pf_pdev); in qm_flr_reset_complete() local
4685 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); in qm_flr_reset_complete()
4697 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_reset_done() local
4700 if (qm->fun_type == QM_HW_PF) { in hisi_qm_reset_done()
4701 ret = qm_dev_hw_init(qm); in hisi_qm_reset_done()
4710 ret = qm_restart(qm); in hisi_qm_reset_done()
4712 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); in hisi_qm_reset_done()
4716 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); in hisi_qm_reset_done()
4720 ret = qm_wait_vf_prepare_finish(qm); in hisi_qm_reset_done()
4725 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_done()
4726 qm_cmd_init(qm); in hisi_qm_reset_done()
4731 qm_reset_bit_clear(qm); in hisi_qm_reset_done()
4737 struct hisi_qm *qm = data; in qm_abnormal_irq() local
4740 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); in qm_abnormal_irq()
4741 ret = qm_process_dev_error(qm); in qm_abnormal_irq()
4743 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && in qm_abnormal_irq()
4744 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) in qm_abnormal_irq()
4745 schedule_work(&qm->rst_work); in qm_abnormal_irq()
4754 * This function will stop qm when OS shutdown or rebooting.
4758 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_dev_shutdown() local
4761 ret = hisi_qm_stop(qm, QM_DOWN); in hisi_qm_dev_shutdown()
4763 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); in hisi_qm_dev_shutdown()
4765 hisi_qm_cache_wb(qm); in hisi_qm_dev_shutdown()
4771 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); in hisi_qm_controller_reset() local
4774 ret = qm_pm_get_sync(qm); in hisi_qm_controller_reset()
4776 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in hisi_qm_controller_reset()
4781 ret = qm_controller_reset(qm); in hisi_qm_controller_reset()
4783 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); in hisi_qm_controller_reset()
4785 qm_pm_put_sync(qm); in hisi_qm_controller_reset()
4788 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, in qm_pf_reset_vf_prepare() argument
4792 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_prepare()
4795 ret = qm_reset_prepare_ready(qm); in qm_pf_reset_vf_prepare()
4798 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
4803 ret = hisi_qm_stop(qm, stop_reason); in qm_pf_reset_vf_prepare()
4805 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); in qm_pf_reset_vf_prepare()
4806 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
4814 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); in qm_pf_reset_vf_prepare()
4815 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); in qm_pf_reset_vf_prepare()
4818 ret = qm_ping_pf(qm, cmd); in qm_pf_reset_vf_prepare()
4823 static void qm_pf_reset_vf_done(struct hisi_qm *qm) in qm_pf_reset_vf_done() argument
4826 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_done()
4830 ret = hisi_qm_start(qm); in qm_pf_reset_vf_done()
4832 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); in qm_pf_reset_vf_done()
4836 qm_cmd_init(qm); in qm_pf_reset_vf_done()
4837 ret = qm_ping_pf(qm, cmd); in qm_pf_reset_vf_done()
4841 qm_reset_bit_clear(qm); in qm_pf_reset_vf_done()
4844 static int qm_wait_pf_reset_finish(struct hisi_qm *qm) in qm_wait_pf_reset_finish() argument
4846 struct device *dev = &qm->pdev->dev; in qm_wait_pf_reset_finish()
4851 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, in qm_wait_pf_reset_finish()
4864 ret = qm->ops->get_ifc(qm, &cmd, NULL, 0); in qm_wait_pf_reset_finish()
4865 qm_clear_cmd_interrupt(qm, 0); in qm_wait_pf_reset_finish()
4879 static void qm_pf_reset_vf_process(struct hisi_qm *qm, in qm_pf_reset_vf_process() argument
4882 struct device *dev = &qm->pdev->dev; in qm_pf_reset_vf_process()
4888 qm_cmd_uninit(qm); in qm_pf_reset_vf_process()
4889 qm_pf_reset_vf_prepare(qm, stop_reason); in qm_pf_reset_vf_process()
4891 ret = qm_wait_pf_reset_finish(qm); in qm_pf_reset_vf_process()
4895 qm_pf_reset_vf_done(qm); in qm_pf_reset_vf_process()
4902 qm_cmd_init(qm); in qm_pf_reset_vf_process()
4903 qm_reset_bit_clear(qm); in qm_pf_reset_vf_process()
4906 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) in qm_handle_cmd_msg() argument
4908 struct device *dev = &qm->pdev->dev; in qm_handle_cmd_msg()
4917 ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num); in qm_handle_cmd_msg()
4918 qm_clear_cmd_interrupt(qm, BIT(fun_num)); in qm_handle_cmd_msg()
4926 qm_pf_reset_vf_process(qm, QM_DOWN); in qm_handle_cmd_msg()
4929 qm_pf_reset_vf_process(qm, QM_SOFT_RESET); in qm_handle_cmd_msg()
4932 qm_vf_get_qos(qm, fun_num); in qm_handle_cmd_msg()
4935 qm->mb_qos = data; in qm_handle_cmd_msg()
4945 struct hisi_qm *qm = container_of(cmd_process, in qm_cmd_process() local
4947 u32 vfs_num = qm->vfs_num; in qm_cmd_process()
4951 if (qm->fun_type == QM_HW_PF) { in qm_cmd_process()
4952 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_cmd_process()
4958 qm_handle_cmd_msg(qm, i); in qm_cmd_process()
4964 qm_handle_cmd_msg(qm, 0); in qm_cmd_process()
4969 * @qm: The qm needs add.
4970 * @qm_list: The qm list.
4975 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) in hisi_qm_alg_register() argument
4977 struct device *dev = &qm->pdev->dev; in hisi_qm_alg_register()
4979 if (qm->ver <= QM_HW_V2 && qm->use_sva) { in hisi_qm_alg_register()
4984 if (qm->qp_num < guard) { in hisi_qm_alg_register()
4989 return qm_list->register_to_crypto(qm); in hisi_qm_alg_register()
4995 * @qm: The qm needs delete.
4996 * @qm_list: The qm list.
5001 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) in hisi_qm_alg_unregister() argument
5003 if (qm->ver <= QM_HW_V2 && qm->use_sva) in hisi_qm_alg_unregister()
5006 if (qm->qp_num < guard) in hisi_qm_alg_unregister()
5009 qm_list->unregister_from_crypto(qm); in hisi_qm_alg_unregister()
5013 static void qm_unregister_abnormal_irq(struct hisi_qm *qm) in qm_unregister_abnormal_irq() argument
5015 struct pci_dev *pdev = qm->pdev; in qm_unregister_abnormal_irq()
5018 if (qm->fun_type == QM_HW_VF) in qm_unregister_abnormal_irq()
5021 val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; in qm_unregister_abnormal_irq()
5026 free_irq(pci_irq_vector(pdev, irq_vector), qm); in qm_unregister_abnormal_irq()
5029 static int qm_register_abnormal_irq(struct hisi_qm *qm) in qm_register_abnormal_irq() argument
5031 struct pci_dev *pdev = qm->pdev; in qm_register_abnormal_irq()
5035 if (qm->fun_type == QM_HW_VF) in qm_register_abnormal_irq()
5038 val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; in qm_register_abnormal_irq()
5043 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); in qm_register_abnormal_irq()
5045 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); in qm_register_abnormal_irq()
5050 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) in qm_unregister_mb_cmd_irq() argument
5052 struct pci_dev *pdev = qm->pdev; in qm_unregister_mb_cmd_irq()
5055 val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val; in qm_unregister_mb_cmd_irq()
5060 free_irq(pci_irq_vector(pdev, irq_vector), qm); in qm_unregister_mb_cmd_irq()
5063 static int qm_register_mb_cmd_irq(struct hisi_qm *qm) in qm_register_mb_cmd_irq() argument
5065 struct pci_dev *pdev = qm->pdev; in qm_register_mb_cmd_irq()
5069 val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val; in qm_register_mb_cmd_irq()
5074 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); in qm_register_mb_cmd_irq()
5081 static void qm_unregister_aeq_irq(struct hisi_qm *qm) in qm_unregister_aeq_irq() argument
5083 struct pci_dev *pdev = qm->pdev; in qm_unregister_aeq_irq()
5086 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val; in qm_unregister_aeq_irq()
5091 free_irq(pci_irq_vector(pdev, irq_vector), qm); in qm_unregister_aeq_irq()
5094 static int qm_register_aeq_irq(struct hisi_qm *qm) in qm_register_aeq_irq() argument
5096 struct pci_dev *pdev = qm->pdev; in qm_register_aeq_irq()
5100 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val; in qm_register_aeq_irq()
5106 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); in qm_register_aeq_irq()
5113 static void qm_unregister_eq_irq(struct hisi_qm *qm) in qm_unregister_eq_irq() argument
5115 struct pci_dev *pdev = qm->pdev; in qm_unregister_eq_irq()
5118 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val; in qm_unregister_eq_irq()
5123 free_irq(pci_irq_vector(pdev, irq_vector), qm); in qm_unregister_eq_irq()
5126 static int qm_register_eq_irq(struct hisi_qm *qm) in qm_register_eq_irq() argument
5128 struct pci_dev *pdev = qm->pdev; in qm_register_eq_irq()
5132 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val; in qm_register_eq_irq()
5137 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); in qm_register_eq_irq()
5144 static void qm_irqs_unregister(struct hisi_qm *qm) in qm_irqs_unregister() argument
5146 qm_unregister_mb_cmd_irq(qm); in qm_irqs_unregister()
5147 qm_unregister_abnormal_irq(qm); in qm_irqs_unregister()
5148 qm_unregister_aeq_irq(qm); in qm_irqs_unregister()
5149 qm_unregister_eq_irq(qm); in qm_irqs_unregister()
5152 static int qm_irqs_register(struct hisi_qm *qm) in qm_irqs_register() argument
5156 ret = qm_register_eq_irq(qm); in qm_irqs_register()
5160 ret = qm_register_aeq_irq(qm); in qm_irqs_register()
5164 ret = qm_register_abnormal_irq(qm); in qm_irqs_register()
5168 ret = qm_register_mb_cmd_irq(qm); in qm_irqs_register()
5175 qm_unregister_abnormal_irq(qm); in qm_irqs_register()
5177 qm_unregister_aeq_irq(qm); in qm_irqs_register()
5179 qm_unregister_eq_irq(qm); in qm_irqs_register()
5183 static int qm_get_qp_num(struct hisi_qm *qm) in qm_get_qp_num() argument
5185 struct device *dev = &qm->pdev->dev; in qm_get_qp_num()
5189 if (qm->fun_type == QM_HW_VF) { in qm_get_qp_num()
5190 if (qm->ver != QM_HW_V1) in qm_get_qp_num()
5192 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); in qm_get_qp_num()
5197 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); in qm_get_qp_num()
5198 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); in qm_get_qp_num()
5199 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, in qm_get_qp_num()
5202 if (qm->qp_num <= qm->max_qp_num) in qm_get_qp_num()
5205 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { in qm_get_qp_num()
5208 qm->qp_num, qm->max_qp_num); in qm_get_qp_num()
5213 qm->qp_num, qm->max_qp_num); in qm_get_qp_num()
5214 qm->qp_num = qm->max_qp_num; in qm_get_qp_num()
5215 qm->debug.curr_qm_qp_num = qm->qp_num; in qm_get_qp_num()
5220 static int qm_pre_store_caps(struct hisi_qm *qm) in qm_pre_store_caps() argument
5223 struct pci_dev *pdev = qm->pdev; in qm_pre_store_caps()
5234 qm_cap[i].cap_val = hisi_qm_get_cap_value(qm, qm_cap_query_info, in qm_pre_store_caps()
5235 i, qm->cap_ver); in qm_pre_store_caps()
5238 qm->cap_tables.qm_cap_table = qm_cap; in qm_pre_store_caps()
5239 qm->cap_tables.qm_cap_size = size; in qm_pre_store_caps()
5244 static int qm_get_hw_caps(struct hisi_qm *qm) in qm_get_hw_caps() argument
5246 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? in qm_get_hw_caps()
5248 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : in qm_get_hw_caps()
5253 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); in qm_get_hw_caps()
5255 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); in qm_get_hw_caps()
5257 if (qm->ver >= QM_HW_V3) { in qm_get_hw_caps()
5258 val = readl(qm->io_base + QM_FUNC_CAPS_REG); in qm_get_hw_caps()
5259 qm->cap_ver = val & QM_CAPBILITY_VERSION; in qm_get_hw_caps()
5264 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); in qm_get_hw_caps()
5266 set_bit(qm_cap_info_comm[i].type, &qm->caps); in qm_get_hw_caps()
5271 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); in qm_get_hw_caps()
5273 set_bit(cap_info[i].type, &qm->caps); in qm_get_hw_caps()
5276 /* Fetch and save the value of qm capability registers */ in qm_get_hw_caps()
5277 return qm_pre_store_caps(qm); in qm_get_hw_caps()
5280 static void qm_get_version(struct hisi_qm *qm) in qm_get_version() argument
5282 struct pci_dev *pdev = qm->pdev; in qm_get_version()
5285 qm->ver = pdev->revision; in qm_get_version()
5288 sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID); in qm_get_version()
5290 qm->ver = sub_version_id; in qm_get_version()
5294 static int qm_get_pci_res(struct hisi_qm *qm) in qm_get_pci_res() argument
5296 struct pci_dev *pdev = qm->pdev; in qm_get_pci_res()
5300 ret = pci_request_mem_regions(pdev, qm->dev_name); in qm_get_pci_res()
5306 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); in qm_get_pci_res()
5307 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); in qm_get_pci_res()
5308 if (!qm->io_base) { in qm_get_pci_res()
5313 qm_get_version(qm); in qm_get_pci_res()
5315 ret = qm_get_hw_caps(qm); in qm_get_pci_res()
5319 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { in qm_get_pci_res()
5320 qm->db_interval = QM_QP_DB_INTERVAL; in qm_get_pci_res()
5321 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); in qm_get_pci_res()
5322 qm->db_io_base = ioremap(qm->db_phys_base, in qm_get_pci_res()
5324 if (!qm->db_io_base) { in qm_get_pci_res()
5329 qm->db_phys_base = qm->phys_base; in qm_get_pci_res()
5330 qm->db_io_base = qm->io_base; in qm_get_pci_res()
5331 qm->db_interval = 0; in qm_get_pci_res()
5334 hisi_qm_pre_init(qm); in qm_get_pci_res()
5335 ret = qm_get_qp_num(qm); in qm_get_pci_res()
5342 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_get_pci_res()
5343 iounmap(qm->db_io_base); in qm_get_pci_res()
5345 iounmap(qm->io_base); in qm_get_pci_res()
5351 static int qm_clear_device(struct hisi_qm *qm) in qm_clear_device() argument
5353 acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); in qm_clear_device()
5356 if (qm->fun_type == QM_HW_VF) in qm_clear_device()
5360 if (!qm->err_ini->err_info_init) in qm_clear_device()
5362 qm->err_ini->err_info_init(qm); in qm_clear_device()
5368 if (!acpi_has_method(handle, qm->err_info.acpi_rst)) in qm_clear_device()
5371 ret = qm_master_ooo_check(qm); in qm_clear_device()
5373 writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_clear_device()
5377 if (qm->err_ini->set_priv_status) { in qm_clear_device()
5378 ret = qm->err_ini->set_priv_status(qm); in qm_clear_device()
5380 writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_clear_device()
5385 return qm_reset_device(qm); in qm_clear_device()
5388 static int hisi_qm_pci_init(struct hisi_qm *qm) in hisi_qm_pci_init() argument
5390 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_init()
5401 ret = qm_get_pci_res(qm); in hisi_qm_pci_init()
5410 num_vec = qm_get_irq_num(qm); in hisi_qm_pci_init()
5417 ret = qm_clear_device(qm); in hisi_qm_pci_init()
5426 qm_put_pci_res(qm); in hisi_qm_pci_init()
5432 static int hisi_qm_init_work(struct hisi_qm *qm) in hisi_qm_init_work() argument
5436 for (i = 0; i < qm->qp_num; i++) in hisi_qm_init_work()
5437 INIT_WORK(&qm->poll_data[i].work, qm_work_process); in hisi_qm_init_work()
5439 if (qm->fun_type == QM_HW_PF) in hisi_qm_init_work()
5440 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); in hisi_qm_init_work()
5442 if (qm->ver > QM_HW_V2) in hisi_qm_init_work()
5443 INIT_WORK(&qm->cmd_process, qm_cmd_process); in hisi_qm_init_work()
5445 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | in hisi_qm_init_work()
5447 pci_name(qm->pdev)); in hisi_qm_init_work()
5448 if (!qm->wq) { in hisi_qm_init_work()
5449 pci_err(qm->pdev, "failed to alloc workqueue!\n"); in hisi_qm_init_work()
5456 static int hisi_qp_alloc_memory(struct hisi_qm *qm) in hisi_qp_alloc_memory() argument
5458 struct device *dev = &qm->pdev->dev; in hisi_qp_alloc_memory()
5463 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); in hisi_qp_alloc_memory()
5464 if (!qm->qp_array) in hisi_qp_alloc_memory()
5467 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); in hisi_qp_alloc_memory()
5468 if (!qm->poll_data) { in hisi_qp_alloc_memory()
5469 kfree(qm->qp_array); in hisi_qp_alloc_memory()
5473 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); in hisi_qp_alloc_memory()
5476 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; in hisi_qp_alloc_memory()
5478 for (i = 0; i < qm->qp_num; i++) { in hisi_qp_alloc_memory()
5479 qm->poll_data[i].qm = qm; in hisi_qp_alloc_memory()
5480 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); in hisi_qp_alloc_memory()
5489 hisi_qp_memory_uninit(qm, i); in hisi_qp_alloc_memory()
5494 static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm) in hisi_qm_alloc_rsv_buf() argument
5496 struct qm_rsv_buf *xqc_buf = &qm->xqc_buf; in hisi_qm_alloc_rsv_buf()
5498 struct device *dev = &qm->pdev->dev; in hisi_qm_alloc_rsv_buf()
5524 static int hisi_qm_memory_init(struct hisi_qm *qm) in hisi_qm_memory_init() argument
5526 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_init()
5530 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { in hisi_qm_memory_init()
5531 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; in hisi_qm_memory_init()
5532 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); in hisi_qm_memory_init()
5533 if (!qm->factor) in hisi_qm_memory_init()
5537 qm->factor[0].func_qos = QM_QOS_MAX_VAL; in hisi_qm_memory_init()
5540 #define QM_INIT_BUF(qm, type, num) do { \ in hisi_qm_memory_init() argument
5541 (qm)->type = ((qm)->qdma.va + (off)); \ in hisi_qm_memory_init()
5542 (qm)->type##_dma = (qm)->qdma.dma + (off); \ in hisi_qm_memory_init()
5546 idr_init(&qm->qp_idr); in hisi_qm_memory_init()
5547 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); in hisi_qm_memory_init()
5548 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + in hisi_qm_memory_init()
5549 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + in hisi_qm_memory_init()
5550 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + in hisi_qm_memory_init()
5551 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); in hisi_qm_memory_init()
5552 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, in hisi_qm_memory_init()
5554 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); in hisi_qm_memory_init()
5555 if (!qm->qdma.va) { in hisi_qm_memory_init()
5560 QM_INIT_BUF(qm, eqe, qm->eq_depth); in hisi_qm_memory_init()
5561 QM_INIT_BUF(qm, aeqe, qm->aeq_depth); in hisi_qm_memory_init()
5562 QM_INIT_BUF(qm, sqc, qm->qp_num); in hisi_qm_memory_init()
5563 QM_INIT_BUF(qm, cqc, qm->qp_num); in hisi_qm_memory_init()
5565 ret = hisi_qm_alloc_rsv_buf(qm); in hisi_qm_memory_init()
5569 ret = hisi_qp_alloc_memory(qm); in hisi_qm_memory_init()
5576 hisi_qm_free_rsv_buf(qm); in hisi_qm_memory_init()
5578 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_init()
5580 idr_destroy(&qm->qp_idr); in hisi_qm_memory_init()
5581 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_memory_init()
5582 kfree(qm->factor); in hisi_qm_memory_init()
5588 * hisi_qm_init() - Initialize configures about qm.
5589 * @qm: The qm needing init.
5591 * This function init qm, then we can call hisi_qm_start to put qm into work.
5593 int hisi_qm_init(struct hisi_qm *qm) in hisi_qm_init() argument
5595 struct pci_dev *pdev = qm->pdev; in hisi_qm_init()
5599 ret = hisi_qm_pci_init(qm); in hisi_qm_init()
5603 ret = qm_irqs_register(qm); in hisi_qm_init()
5607 if (qm->fun_type == QM_HW_PF) { in hisi_qm_init()
5609 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); in hisi_qm_init()
5610 qm_disable_clock_gate(qm); in hisi_qm_init()
5611 ret = qm_dev_mem_reset(qm); in hisi_qm_init()
5618 if (qm->mode == UACCE_MODE_SVA) { in hisi_qm_init()
5619 ret = qm_alloc_uacce(qm); in hisi_qm_init()
5624 ret = hisi_qm_memory_init(qm); in hisi_qm_init()
5628 ret = hisi_qm_init_work(qm); in hisi_qm_init()
5632 qm_cmd_init(qm); in hisi_qm_init()
5637 hisi_qm_memory_uninit(qm); in hisi_qm_init()
5639 qm_remove_uacce(qm); in hisi_qm_init()
5641 qm_irqs_unregister(qm); in hisi_qm_init()
5643 hisi_qm_pci_uninit(qm); in hisi_qm_init()
5650 * @qm: pointer to accelerator device.
5657 int hisi_qm_get_dfx_access(struct hisi_qm *qm) in hisi_qm_get_dfx_access() argument
5659 struct device *dev = &qm->pdev->dev; in hisi_qm_get_dfx_access()
5666 return qm_pm_get_sync(qm); in hisi_qm_get_dfx_access()
5672 * @qm: pointer to accelerator device.
5676 void hisi_qm_put_dfx_access(struct hisi_qm *qm) in hisi_qm_put_dfx_access() argument
5678 qm_pm_put_sync(qm); in hisi_qm_put_dfx_access()
5683 * hisi_qm_pm_init() - Initialize qm runtime PM.
5684 * @qm: pointer to accelerator device.
5686 * Function that initialize qm runtime PM.
5688 void hisi_qm_pm_init(struct hisi_qm *qm) in hisi_qm_pm_init() argument
5690 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_init()
5692 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in hisi_qm_pm_init()
5702 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
5703 * @qm: pointer to accelerator device.
5705 * Function that uninitialize qm runtime PM.
5707 void hisi_qm_pm_uninit(struct hisi_qm *qm) in hisi_qm_pm_uninit() argument
5709 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_uninit()
5711 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in hisi_qm_pm_uninit()
5719 static int qm_prepare_for_suspend(struct hisi_qm *qm) in qm_prepare_for_suspend() argument
5721 struct pci_dev *pdev = qm->pdev; in qm_prepare_for_suspend()
5724 ret = qm->ops->set_msi(qm, false); in qm_prepare_for_suspend()
5730 ret = qm_master_ooo_check(qm); in qm_prepare_for_suspend()
5734 if (qm->err_ini->set_priv_status) { in qm_prepare_for_suspend()
5735 ret = qm->err_ini->set_priv_status(qm); in qm_prepare_for_suspend()
5740 ret = qm_set_pf_mse(qm, false); in qm_prepare_for_suspend()
5747 static int qm_rebuild_for_resume(struct hisi_qm *qm) in qm_rebuild_for_resume() argument
5749 struct pci_dev *pdev = qm->pdev; in qm_rebuild_for_resume()
5752 ret = qm_set_pf_mse(qm, true); in qm_rebuild_for_resume()
5758 ret = qm->ops->set_msi(qm, true); in qm_rebuild_for_resume()
5764 ret = qm_dev_hw_init(qm); in qm_rebuild_for_resume()
5770 qm_cmd_init(qm); in qm_rebuild_for_resume()
5771 hisi_qm_dev_err_init(qm); in qm_rebuild_for_resume()
5773 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); in qm_rebuild_for_resume()
5774 qm_disable_clock_gate(qm); in qm_rebuild_for_resume()
5775 ret = qm_dev_mem_reset(qm); in qm_rebuild_for_resume()
5791 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_suspend() local
5796 ret = hisi_qm_stop(qm, QM_NORMAL); in hisi_qm_suspend()
5798 pci_err(pdev, "failed to stop qm(%d)\n", ret); in hisi_qm_suspend()
5802 ret = qm_prepare_for_suspend(qm); in hisi_qm_suspend()
5819 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_resume() local
5824 ret = qm_rebuild_for_resume(qm); in hisi_qm_resume()
5830 ret = hisi_qm_start(qm); in hisi_qm_resume()
5832 if (qm_check_dev_error(qm)) { in hisi_qm_resume()
5833 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n"); in hisi_qm_resume()
5837 pci_err(pdev, "failed to start qm(%d)!\n", ret); in hisi_qm_resume()