Lines Matching full:qm
369 bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) in hpre_check_alg_support() argument
373 cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP].cap_val; in hpre_check_alg_support()
382 struct hisi_qm *qm = s->private; in hpre_diff_regs_show() local
384 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, in hpre_diff_regs_show()
469 static void hpre_config_pasid(struct hisi_qm *qm) in hpre_config_pasid() argument
473 if (qm->ver >= QM_HW_V3) in hpre_config_pasid()
476 val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); in hpre_config_pasid()
477 val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG); in hpre_config_pasid()
478 if (qm->use_sva) { in hpre_config_pasid()
485 writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG); in hpre_config_pasid()
486 writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG); in hpre_config_pasid()
489 static int hpre_cfg_by_dsm(struct hisi_qm *qm) in hpre_cfg_by_dsm() argument
491 struct device *dev = &qm->pdev->dev; in hpre_cfg_by_dsm()
513 static int hpre_set_cluster(struct hisi_qm *qm) in hpre_set_cluster() argument
515 struct device *dev = &qm->pdev->dev; in hpre_set_cluster()
523 cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_EN].cap_val; in hpre_set_cluster()
524 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; in hpre_set_cluster()
532 qm->io_base + offset + HPRE_CORE_ENB); in hpre_set_cluster()
533 writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG); in hpre_set_cluster()
534 ret = readl_relaxed_poll_timeout(qm->io_base + offset + in hpre_set_cluster()
555 static void disable_flr_of_bme(struct hisi_qm *qm) in disable_flr_of_bme() argument
559 val = readl(qm->io_base + QM_PEH_AXUSER_CFG); in disable_flr_of_bme()
562 writel(val, qm->io_base + QM_PEH_AXUSER_CFG); in disable_flr_of_bme()
563 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); in disable_flr_of_bme()
566 static void hpre_open_sva_prefetch(struct hisi_qm *qm) in hpre_open_sva_prefetch() argument
571 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) in hpre_open_sva_prefetch()
575 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); in hpre_open_sva_prefetch()
577 writel(val, qm->io_base + HPRE_PREFETCH_CFG); in hpre_open_sva_prefetch()
579 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG, in hpre_open_sva_prefetch()
584 pci_err(qm->pdev, "failed to open sva prefetch\n"); in hpre_open_sva_prefetch()
587 static void hpre_close_sva_prefetch(struct hisi_qm *qm) in hpre_close_sva_prefetch() argument
592 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) in hpre_close_sva_prefetch()
595 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); in hpre_close_sva_prefetch()
597 writel(val, qm->io_base + HPRE_PREFETCH_CFG); in hpre_close_sva_prefetch()
599 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX, in hpre_close_sva_prefetch()
604 pci_err(qm->pdev, "failed to close sva prefetch\n"); in hpre_close_sva_prefetch()
607 static void hpre_enable_clock_gate(struct hisi_qm *qm) in hpre_enable_clock_gate() argument
614 if (qm->ver < QM_HW_V3) in hpre_enable_clock_gate()
617 val = readl(qm->io_base + HPRE_CLKGATE_CTL); in hpre_enable_clock_gate()
619 writel(val, qm->io_base + HPRE_CLKGATE_CTL); in hpre_enable_clock_gate()
621 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE); in hpre_enable_clock_gate()
623 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE); in hpre_enable_clock_gate()
625 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; in hpre_enable_clock_gate()
630 val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL); in hpre_enable_clock_gate()
632 writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL); in hpre_enable_clock_gate()
634 val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG); in hpre_enable_clock_gate()
636 writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG); in hpre_enable_clock_gate()
640 static void hpre_disable_clock_gate(struct hisi_qm *qm) in hpre_disable_clock_gate() argument
647 if (qm->ver < QM_HW_V3) in hpre_disable_clock_gate()
650 val = readl(qm->io_base + HPRE_CLKGATE_CTL); in hpre_disable_clock_gate()
652 writel(val, qm->io_base + HPRE_CLKGATE_CTL); in hpre_disable_clock_gate()
654 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE); in hpre_disable_clock_gate()
656 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE); in hpre_disable_clock_gate()
658 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; in hpre_disable_clock_gate()
663 val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL); in hpre_disable_clock_gate()
665 writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL); in hpre_disable_clock_gate()
667 val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG); in hpre_disable_clock_gate()
669 writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG); in hpre_disable_clock_gate()
673 static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) in hpre_set_user_domain_and_cache() argument
675 struct device *dev = &qm->pdev->dev; in hpre_set_user_domain_and_cache()
680 hpre_disable_clock_gate(qm); in hpre_set_user_domain_and_cache()
682 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE); in hpre_set_user_domain_and_cache()
683 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE); in hpre_set_user_domain_and_cache()
684 writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG); in hpre_set_user_domain_and_cache()
686 if (qm->ver >= QM_HW_V3) in hpre_set_user_domain_and_cache()
688 qm->io_base + HPRE_TYPES_ENB); in hpre_set_user_domain_and_cache()
690 writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB); in hpre_set_user_domain_and_cache()
692 writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE); in hpre_set_user_domain_and_cache()
693 writel(0x0, qm->io_base + HPRE_BD_ENDIAN); in hpre_set_user_domain_and_cache()
694 writel(0x0, qm->io_base + HPRE_POISON_BYPASS); in hpre_set_user_domain_and_cache()
695 writel(0x0, qm->io_base + HPRE_ECC_BYPASS); in hpre_set_user_domain_and_cache()
697 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG); in hpre_set_user_domain_and_cache()
698 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG); in hpre_set_user_domain_and_cache()
699 writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG); in hpre_set_user_domain_and_cache()
700 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val, in hpre_set_user_domain_and_cache()
709 ret = hpre_set_cluster(qm); in hpre_set_user_domain_and_cache()
714 if (qm->ver == QM_HW_V2) { in hpre_set_user_domain_and_cache()
715 ret = hpre_cfg_by_dsm(qm); in hpre_set_user_domain_and_cache()
719 disable_flr_of_bme(qm); in hpre_set_user_domain_and_cache()
723 hpre_config_pasid(qm); in hpre_set_user_domain_and_cache()
725 hpre_enable_clock_gate(qm); in hpre_set_user_domain_and_cache()
730 static void hpre_cnt_regs_clear(struct hisi_qm *qm) in hpre_cnt_regs_clear() argument
738 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; in hpre_cnt_regs_clear()
743 writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY); in hpre_cnt_regs_clear()
747 writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE); in hpre_cnt_regs_clear()
749 hisi_qm_debug_regs_clear(qm); in hpre_cnt_regs_clear()
752 static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable) in hpre_master_ooo_ctrl() argument
756 val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_master_ooo_ctrl()
759 val2 = hisi_qm_get_hw_info(qm, hpre_basic_info, in hpre_master_ooo_ctrl()
760 HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); in hpre_master_ooo_ctrl()
766 if (qm->ver > QM_HW_V2) in hpre_master_ooo_ctrl()
767 writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL); in hpre_master_ooo_ctrl()
769 writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_master_ooo_ctrl()
772 static void hpre_hw_error_disable(struct hisi_qm *qm) in hpre_hw_error_disable() argument
776 ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); in hpre_hw_error_disable()
777 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); in hpre_hw_error_disable()
780 writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK); in hpre_hw_error_disable()
782 hpre_master_ooo_ctrl(qm, false); in hpre_hw_error_disable()
785 static void hpre_hw_error_enable(struct hisi_qm *qm) in hpre_hw_error_enable() argument
789 ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); in hpre_hw_error_enable()
790 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); in hpre_hw_error_enable()
793 writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT); in hpre_hw_error_enable()
796 writel(ce, qm->io_base + HPRE_RAS_CE_ENB); in hpre_hw_error_enable()
797 writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); in hpre_hw_error_enable()
798 writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); in hpre_hw_error_enable()
801 hpre_master_ooo_ctrl(qm, true); in hpre_hw_error_enable()
805 writel(~err_en, qm->io_base + HPRE_INT_MASK); in hpre_hw_error_enable()
812 return &hpre->qm; in hpre_file_to_qm()
817 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_clear_enable_read() local
819 return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & in hpre_clear_enable_read()
825 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_clear_enable_write() local
831 tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & in hpre_clear_enable_write()
833 writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE); in hpre_clear_enable_write()
840 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_cluster_inqry_read() local
845 return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT); in hpre_cluster_inqry_read()
850 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_cluster_inqry_write() local
855 writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); in hpre_cluster_inqry_write()
862 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_ctrl_debug_read() local
867 ret = hisi_qm_get_dfx_access(qm); in hpre_ctrl_debug_read()
884 hisi_qm_put_dfx_access(qm); in hpre_ctrl_debug_read()
890 hisi_qm_put_dfx_access(qm); in hpre_ctrl_debug_read()
898 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_ctrl_debug_write() local
918 ret = hisi_qm_get_dfx_access(qm); in hpre_ctrl_debug_write()
941 hisi_qm_put_dfx_access(qm); in hpre_ctrl_debug_write()
981 static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, in hpre_create_debugfs_file() argument
984 struct hpre *hpre = container_of(qm, struct hpre, qm); in hpre_create_debugfs_file()
991 file_dir = qm->debug.debug_root; in hpre_create_debugfs_file()
1006 static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm) in hpre_pf_comm_regs_debugfs_init() argument
1008 struct device *dev = &qm->pdev->dev; in hpre_pf_comm_regs_debugfs_init()
1017 regset->base = qm->io_base; in hpre_pf_comm_regs_debugfs_init()
1020 debugfs_create_file("regs", 0444, qm->debug.debug_root, in hpre_pf_comm_regs_debugfs_init()
1026 static int hpre_cluster_debugfs_init(struct hisi_qm *qm) in hpre_cluster_debugfs_init() argument
1028 struct device *dev = &qm->pdev->dev; in hpre_cluster_debugfs_init()
1036 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; in hpre_cluster_debugfs_init()
1043 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); in hpre_cluster_debugfs_init()
1051 regset->base = qm->io_base + hpre_cluster_offsets[i]; in hpre_cluster_debugfs_init()
1056 ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL, in hpre_cluster_debugfs_init()
1065 static int hpre_ctrl_debug_init(struct hisi_qm *qm) in hpre_ctrl_debug_init() argument
1069 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE, in hpre_ctrl_debug_init()
1074 ret = hpre_pf_comm_regs_debugfs_init(qm); in hpre_ctrl_debug_init()
1078 return hpre_cluster_debugfs_init(qm); in hpre_ctrl_debug_init()
1083 struct hisi_qm *qm = s->private; in hpre_cap_regs_show() local
1086 size = qm->cap_tables.qm_cap_size; in hpre_cap_regs_show()
1088 seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name, in hpre_cap_regs_show()
1089 qm->cap_tables.qm_cap_table[i].cap_val); in hpre_cap_regs_show()
1091 size = qm->cap_tables.dev_cap_size; in hpre_cap_regs_show()
1093 seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name, in hpre_cap_regs_show()
1094 qm->cap_tables.dev_cap_table[i].cap_val); in hpre_cap_regs_show()
1101 static void hpre_dfx_debug_init(struct hisi_qm *qm) in hpre_dfx_debug_init() argument
1103 struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs; in hpre_dfx_debug_init()
1104 struct hpre *hpre = container_of(qm, struct hpre, qm); in hpre_dfx_debug_init()
1109 parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root); in hpre_dfx_debug_init()
1116 if (qm->fun_type == QM_HW_PF && hpre_regs) in hpre_dfx_debug_init()
1118 qm, &hpre_diff_regs_fops); in hpre_dfx_debug_init()
1121 qm->debug.debug_root, qm, &hpre_cap_regs_fops); in hpre_dfx_debug_init()
1124 static int hpre_debugfs_init(struct hisi_qm *qm) in hpre_debugfs_init() argument
1126 struct device *dev = &qm->pdev->dev; in hpre_debugfs_init()
1129 ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs)); in hpre_debugfs_init()
1135 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), in hpre_debugfs_init()
1137 qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; in hpre_debugfs_init()
1138 qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; in hpre_debugfs_init()
1140 hisi_qm_debug_init(qm); in hpre_debugfs_init()
1142 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) { in hpre_debugfs_init()
1143 ret = hpre_ctrl_debug_init(qm); in hpre_debugfs_init()
1148 hpre_dfx_debug_init(qm); in hpre_debugfs_init()
1153 debugfs_remove_recursive(qm->debug.debug_root); in hpre_debugfs_init()
1154 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); in hpre_debugfs_init()
1158 static void hpre_debugfs_exit(struct hisi_qm *qm) in hpre_debugfs_exit() argument
1160 debugfs_remove_recursive(qm->debug.debug_root); in hpre_debugfs_exit()
1162 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); in hpre_debugfs_exit()
1165 static int hpre_pre_store_cap_reg(struct hisi_qm *qm) in hpre_pre_store_cap_reg() argument
1168 struct device *dev = &qm->pdev->dev; in hpre_pre_store_cap_reg()
1181 hpre_cap[i].cap_val = hisi_qm_get_cap_value(qm, hpre_cap_query_info, in hpre_pre_store_cap_reg()
1182 i, qm->cap_ver); in hpre_pre_store_cap_reg()
1194 qm->cap_tables.dev_cap_table = hpre_cap; in hpre_pre_store_cap_reg()
1195 qm->cap_tables.dev_cap_size = size; in hpre_pre_store_cap_reg()
1200 static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) in hpre_qm_init() argument
1210 qm->mode = uacce_mode; in hpre_qm_init()
1211 qm->pdev = pdev; in hpre_qm_init()
1212 qm->sqe_size = HPRE_SQE_SIZE; in hpre_qm_init()
1213 qm->dev_name = hpre_name; in hpre_qm_init()
1215 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ? in hpre_qm_init()
1217 if (qm->fun_type == QM_HW_PF) { in hpre_qm_init()
1218 qm->qp_base = HPRE_PF_DEF_Q_BASE; in hpre_qm_init()
1219 qm->qp_num = pf_q_num; in hpre_qm_init()
1220 qm->debug.curr_qm_qp_num = pf_q_num; in hpre_qm_init()
1221 qm->qm_list = &hpre_devices; in hpre_qm_init()
1222 qm->err_ini = &hpre_err_ini; in hpre_qm_init()
1224 set_bit(QM_MODULE_PARAM, &qm->misc_ctl); in hpre_qm_init()
1227 ret = hisi_qm_init(qm); in hpre_qm_init()
1229 pci_err(pdev, "Failed to init hpre qm configures!\n"); in hpre_qm_init()
1234 ret = hpre_pre_store_cap_reg(qm); in hpre_qm_init()
1237 hisi_qm_uninit(qm); in hpre_qm_init()
1241 alg_msk = qm->cap_tables.dev_cap_table[HPRE_ALG_BITMAP].cap_val; in hpre_qm_init()
1242 ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs)); in hpre_qm_init()
1245 hisi_qm_uninit(qm); in hpre_qm_init()
1251 static int hpre_show_last_regs_init(struct hisi_qm *qm) in hpre_show_last_regs_init() argument
1255 struct qm_debug *debug = &qm->debug; in hpre_show_last_regs_init()
1261 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; in hpre_show_last_regs_init()
1270 debug->last_words[i] = readl_relaxed(qm->io_base + in hpre_show_last_regs_init()
1274 io_base = qm->io_base + hpre_cluster_offsets[i]; in hpre_show_last_regs_init()
1285 static void hpre_show_last_regs_uninit(struct hisi_qm *qm) in hpre_show_last_regs_uninit() argument
1287 struct qm_debug *debug = &qm->debug; in hpre_show_last_regs_uninit()
1289 if (qm->fun_type == QM_HW_VF || !debug->last_words) in hpre_show_last_regs_uninit()
1296 static void hpre_show_last_dfx_regs(struct hisi_qm *qm) in hpre_show_last_dfx_regs() argument
1300 struct qm_debug *debug = &qm->debug; in hpre_show_last_dfx_regs()
1301 struct pci_dev *pdev = qm->pdev; in hpre_show_last_dfx_regs()
1308 if (qm->fun_type == QM_HW_VF || !debug->last_words) in hpre_show_last_dfx_regs()
1313 val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset); in hpre_show_last_dfx_regs()
1319 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; in hpre_show_last_dfx_regs()
1323 io_base = qm->io_base + hpre_cluster_offsets[i]; in hpre_show_last_dfx_regs()
1335 static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) in hpre_log_hw_error() argument
1338 struct device *dev = &qm->pdev->dev; in hpre_log_hw_error()
1348 static u32 hpre_get_hw_err_status(struct hisi_qm *qm) in hpre_get_hw_err_status() argument
1350 return readl(qm->io_base + HPRE_INT_STATUS); in hpre_get_hw_err_status()
1353 static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) in hpre_clear_hw_err_status() argument
1355 writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); in hpre_clear_hw_err_status()
1358 static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type) in hpre_disable_error_report() argument
1362 nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); in hpre_disable_error_report()
1363 writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB); in hpre_disable_error_report()
1366 static void hpre_open_axi_master_ooo(struct hisi_qm *qm) in hpre_open_axi_master_ooo() argument
1370 value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_open_axi_master_ooo()
1372 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_open_axi_master_ooo()
1374 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_open_axi_master_ooo()
1377 static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm) in hpre_get_err_result() argument
1381 err_status = hpre_get_hw_err_status(qm); in hpre_get_err_result()
1383 if (err_status & qm->err_info.ecc_2bits_mask) in hpre_get_err_result()
1384 qm->err_status.is_dev_ecc_mbit = true; in hpre_get_err_result()
1385 hpre_log_hw_error(qm, err_status); in hpre_get_err_result()
1387 if (err_status & qm->err_info.dev_reset_mask) { in hpre_get_err_result()
1389 hpre_disable_error_report(qm, err_status); in hpre_get_err_result()
1392 hpre_clear_hw_err_status(qm, err_status); in hpre_get_err_result()
1398 static bool hpre_dev_is_abnormal(struct hisi_qm *qm) in hpre_dev_is_abnormal() argument
1402 err_status = hpre_get_hw_err_status(qm); in hpre_dev_is_abnormal()
1403 if (err_status & qm->err_info.dev_shutdown_mask) in hpre_dev_is_abnormal()
1409 static void hpre_err_info_init(struct hisi_qm *qm) in hpre_err_info_init() argument
1411 struct hisi_qm_err_info *err_info = &qm->err_info; in hpre_err_info_init()
1414 err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1415 err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1417 err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, in hpre_err_info_init()
1418 HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1419 err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, in hpre_err_info_init()
1420 HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1421 err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, in hpre_err_info_init()
1422 HPRE_QM_RESET_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1423 err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, in hpre_err_info_init()
1424 HPRE_RESET_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1446 struct hisi_qm *qm = &hpre->qm; in hpre_pf_probe_init() local
1449 ret = hpre_set_user_domain_and_cache(qm); in hpre_pf_probe_init()
1453 hpre_open_sva_prefetch(qm); in hpre_pf_probe_init()
1455 hisi_qm_dev_err_init(qm); in hpre_pf_probe_init()
1456 ret = hpre_show_last_regs_init(qm); in hpre_pf_probe_init()
1458 pci_err(qm->pdev, "Failed to init last word regs!\n"); in hpre_pf_probe_init()
1466 struct hisi_qm *qm = &hpre->qm; in hpre_probe_init() local
1469 if (qm->fun_type == QM_HW_PF) { in hpre_probe_init()
1474 if (qm->ver >= QM_HW_V3) { in hpre_probe_init()
1476 qm->type_rate = type_rate; in hpre_probe_init()
1483 static void hpre_probe_uninit(struct hisi_qm *qm) in hpre_probe_uninit() argument
1485 if (qm->fun_type == QM_HW_VF) in hpre_probe_uninit()
1488 hpre_cnt_regs_clear(qm); in hpre_probe_uninit()
1489 qm->debug.curr_qm_qp_num = 0; in hpre_probe_uninit()
1490 hpre_show_last_regs_uninit(qm); in hpre_probe_uninit()
1491 hpre_close_sva_prefetch(qm); in hpre_probe_uninit()
1492 hisi_qm_dev_err_uninit(qm); in hpre_probe_uninit()
1497 struct hisi_qm *qm; in hpre_probe() local
1505 qm = &hpre->qm; in hpre_probe()
1506 ret = hpre_qm_init(qm, pdev); in hpre_probe()
1508 pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret); in hpre_probe()
1518 ret = hisi_qm_start(qm); in hpre_probe()
1522 ret = hpre_debugfs_init(qm); in hpre_probe()
1526 hisi_qm_add_list(qm, &hpre_devices); in hpre_probe()
1527 ret = hisi_qm_alg_register(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF); in hpre_probe()
1533 if (qm->uacce) { in hpre_probe()
1534 ret = uacce_register(qm->uacce); in hpre_probe()
1541 if (qm->fun_type == QM_HW_PF && vfs_num) { in hpre_probe()
1547 hisi_qm_pm_init(qm); in hpre_probe()
1552 hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF); in hpre_probe()
1555 hisi_qm_del_list(qm, &hpre_devices); in hpre_probe()
1556 hpre_debugfs_exit(qm); in hpre_probe()
1557 hisi_qm_stop(qm, QM_NORMAL); in hpre_probe()
1560 hpre_probe_uninit(qm); in hpre_probe()
1563 hisi_qm_uninit(qm); in hpre_probe()
1570 struct hisi_qm *qm = pci_get_drvdata(pdev); in hpre_remove() local
1572 hisi_qm_pm_uninit(qm); in hpre_remove()
1573 hisi_qm_wait_task_finish(qm, &hpre_devices); in hpre_remove()
1574 hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF); in hpre_remove()
1575 hisi_qm_del_list(qm, &hpre_devices); in hpre_remove()
1576 if (qm->fun_type == QM_HW_PF && qm->vfs_num) in hpre_remove()
1579 hpre_debugfs_exit(qm); in hpre_remove()
1580 hisi_qm_stop(qm, QM_NORMAL); in hpre_remove()
1582 hpre_probe_uninit(qm); in hpre_remove()
1583 hisi_qm_uninit(qm); in hpre_remove()