diff options
Diffstat (limited to 'drivers/crypto/hisilicon')
-rw-r--r-- | drivers/crypto/hisilicon/debugfs.c | 1 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/hpre/hpre_main.c | 179 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/qm.c | 218 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec/sec_drv.c | 3 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec_crypto.c | 8 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec_main.c | 229 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/zip/dae_main.c | 19 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/zip/zip_main.c | 234 |
8 files changed, 643 insertions, 248 deletions
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 45e130b901eb..17eb236e9ee4 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -888,6 +888,7 @@ static int qm_diff_regs_init(struct hisi_qm *qm, dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); ret = PTR_ERR(qm->debug.acc_diff_regs); qm->debug.acc_diff_regs = NULL; + qm->debug.qm_diff_regs = NULL; return ret; } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index f5b47e5ff48a..b94fecd765ee 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -39,6 +39,7 @@ #define HPRE_HAC_RAS_NFE_ENB 0x301414 #define HPRE_HAC_RAS_FE_ENB 0x301418 #define HPRE_HAC_INT_SET 0x301500 +#define HPRE_AXI_ERROR_MASK GENMASK(21, 10) #define HPRE_RNG_TIMEOUT_NUM 0x301A34 #define HPRE_CORE_INT_ENABLE 0 #define HPRE_RDCHN_INI_ST 0x301a00 @@ -78,6 +79,11 @@ #define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30))) #define HPRE_PREFETCH_DISABLE BIT(30) #define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8)) +#define HPRE_SVA_PREFTCH_DFX4 0x301144 +#define HPRE_WAIT_SVA_READY 500000 +#define HPRE_READ_SVA_STATUS_TIMES 3 +#define HPRE_WAIT_US_MIN 10 +#define HPRE_WAIT_US_MAX 20 /* clock gate */ #define HPRE_CLKGATE_CTL 0x301a10 @@ -466,6 +472,33 @@ struct hisi_qp *hpre_create_qp(u8 type) return NULL; } +static int hpre_wait_sva_ready(struct hisi_qm *qm) +{ + u32 val, try_times = 0; + u8 count = 0; + + /* + * Read the register value every 10-20us. If the value is 0 for three + * consecutive times, the SVA module is ready. + */ + do { + val = readl(qm->io_base + HPRE_SVA_PREFTCH_DFX4); + if (val) + count = 0; + else if (++count == HPRE_READ_SVA_STATUS_TIMES) + break; + + usleep_range(HPRE_WAIT_US_MIN, HPRE_WAIT_US_MAX); + } while (++try_times < HPRE_WAIT_SVA_READY); + + if (try_times == HPRE_WAIT_SVA_READY) { + pci_err(qm->pdev, "failed to wait sva prefetch ready\n"); + return -ETIMEDOUT; + } + + return 0; +} + static void hpre_config_pasid(struct hisi_qm *qm) { u32 val1, val2; @@ -563,7 +596,7 @@ static void disable_flr_of_bme(struct hisi_qm *qm) writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); } -static void hpre_open_sva_prefetch(struct hisi_qm *qm) +static void hpre_close_sva_prefetch(struct hisi_qm *qm) { u32 val; int ret; @@ -571,20 +604,21 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm) if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; - /* Enable prefetch */ val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); - val &= HPRE_PREFETCH_ENABLE; + val |= HPRE_PREFETCH_DISABLE; writel(val, qm->io_base + HPRE_PREFETCH_CFG); - ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG, - val, !(val & HPRE_PREFETCH_DISABLE), + ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX, + val, !(val & HPRE_SVA_DISABLE_READY), HPRE_REG_RD_INTVRL_US, HPRE_REG_RD_TMOUT_US); if (ret) - pci_err(qm->pdev, "failed to open sva prefetch\n"); + pci_err(qm->pdev, "failed to close sva prefetch\n"); + + (void)hpre_wait_sva_ready(qm); } -static void hpre_close_sva_prefetch(struct hisi_qm *qm) +static void hpre_open_sva_prefetch(struct hisi_qm *qm) { u32 val; int ret; @@ -592,16 +626,24 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm) if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; + /* Enable prefetch */ val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); - val |= HPRE_PREFETCH_DISABLE; + val &= HPRE_PREFETCH_ENABLE; writel(val, qm->io_base + HPRE_PREFETCH_CFG); - ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX, - val, !(val & HPRE_SVA_DISABLE_READY), + ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG, + val, !(val & HPRE_PREFETCH_DISABLE), HPRE_REG_RD_INTVRL_US, HPRE_REG_RD_TMOUT_US); + if (ret) { + pci_err(qm->pdev, "failed to open sva prefetch\n"); + hpre_close_sva_prefetch(qm); + return; + } + + ret = hpre_wait_sva_ready(qm); if (ret) - pci_err(qm->pdev, "failed to close sva prefetch\n"); + hpre_close_sva_prefetch(qm); } static void hpre_enable_clock_gate(struct hisi_qm *qm) @@ -721,6 +763,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) /* Config data buffer pasid needed by Kunpeng 920 */ hpre_config_pasid(qm); + hpre_open_sva_prefetch(qm); hpre_enable_clock_gate(qm); @@ -756,8 +799,7 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable) val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); if (enable) { val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE; - val2 = hisi_qm_get_hw_info(qm, hpre_basic_info, - HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + val2 = qm->err_info.dev_err.shutdown_mask; } else { val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE; val2 = 0x0; @@ -771,38 +813,33 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable) static void hpre_hw_error_disable(struct hisi_qm *qm) { - u32 ce, nfe; - - ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); - nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; /* disable hpre hw error interrupts */ - writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK); + writel(err_mask, qm->io_base + HPRE_INT_MASK); /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */ hpre_master_ooo_ctrl(qm, false); } static void hpre_hw_error_enable(struct hisi_qm *qm) { - u32 ce, nfe, err_en; - - ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); - nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; /* clear HPRE hw error source if having */ - writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT); + writel(err_mask, qm->io_base + HPRE_HAC_SOURCE_INT); /* configure error type */ - writel(ce, qm->io_base + HPRE_RAS_CE_ENB); - writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); - writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); + writel(dev_err->ce, qm->io_base + HPRE_RAS_CE_ENB); + writel(dev_err->nfe, qm->io_base + HPRE_RAS_NFE_ENB); + writel(dev_err->fe, qm->io_base + HPRE_RAS_FE_ENB); /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */ hpre_master_ooo_ctrl(qm, true); /* enable hpre hw error interrupts */ - err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE; - writel(~err_en, qm->io_base + HPRE_INT_MASK); + writel(~err_mask, qm->io_base + HPRE_INT_MASK); } static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) @@ -1171,7 +1208,7 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm) size_t i, size; size = ARRAY_SIZE(hpre_cap_query_info); - hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL); + hpre_cap = devm_kcalloc(dev, size, sizeof(*hpre_cap), GFP_KERNEL); if (!hpre_cap) return -ENOMEM; @@ -1357,12 +1394,20 @@ static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type) { - u32 nfe_mask; + u32 nfe_mask = qm->err_info.dev_err.nfe; - nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB); } +static void hpre_enable_error_report(struct hisi_qm *qm) +{ + u32 nfe_mask = qm->err_info.dev_err.nfe; + u32 ce_mask = qm->err_info.dev_err.ce; + + writel(nfe_mask, qm->io_base + HPRE_RAS_NFE_ENB); + writel(ce_mask, qm->io_base + HPRE_RAS_CE_ENB); +} + static void hpre_open_axi_master_ooo(struct hisi_qm *qm) { u32 value; @@ -1380,16 +1425,18 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm) err_status = hpre_get_hw_err_status(qm); if (err_status) { - if (err_status & qm->err_info.ecc_2bits_mask) + if (err_status & qm->err_info.dev_err.ecc_2bits_mask) qm->err_status.is_dev_ecc_mbit = true; hpre_log_hw_error(qm, err_status); - if (err_status & qm->err_info.dev_reset_mask) { + if (err_status & qm->err_info.dev_err.reset_mask) { /* Disable the same error reporting until device is recovered. */ hpre_disable_error_report(qm, err_status); return ACC_ERR_NEED_RESET; } hpre_clear_hw_err_status(qm, err_status); + /* Avoid firmware disable error report, re-enable. */ + hpre_enable_error_report(qm); } return ACC_ERR_RECOVERED; @@ -1400,28 +1447,64 @@ static bool hpre_dev_is_abnormal(struct hisi_qm *qm) u32 err_status; err_status = hpre_get_hw_err_status(qm); - if (err_status & qm->err_info.dev_shutdown_mask) + if (err_status & qm->err_info.dev_err.shutdown_mask) return true; return false; } +static void hpre_disable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + u32 val; + + val = ~(err_mask & (~HPRE_AXI_ERROR_MASK)); + writel(val, qm->io_base + HPRE_INT_MASK); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask & (~HPRE_AXI_ERROR_MASK), + qm->io_base + HPRE_OOO_SHUTDOWN_SEL); +} + +static void hpre_enable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + + /* clear axi error source */ + writel(HPRE_AXI_ERROR_MASK, qm->io_base + HPRE_HAC_SOURCE_INT); + + writel(~err_mask, qm->io_base + HPRE_INT_MASK); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask, qm->io_base + HPRE_OOO_SHUTDOWN_SEL); +} + static void hpre_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; + struct hisi_qm_err_mask *qm_err = &err_info->qm_err; + struct hisi_qm_err_mask *dev_err = &err_info->dev_err; + + qm_err->fe = HPRE_HAC_RAS_FE_ENABLE; + qm_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver); + qm_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver); + qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + qm_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_QM_RESET_MASK_CAP, qm->cap_ver); + qm_err->ecc_2bits_mask = QM_ECC_MBIT; + + dev_err->fe = HPRE_HAC_RAS_FE_ENABLE; + dev_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); + dev_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + dev_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_RESET_MASK_CAP, qm->cap_ver); + dev_err->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR; - err_info->fe = HPRE_HAC_RAS_FE_ENABLE; - err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver); - err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver); - err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR; - err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, - HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, - HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, - HPRE_QM_RESET_MASK_CAP, qm->cap_ver); - err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, - HPRE_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = HPRE_WR_MSI_PORT; err_info->acpi_rst = "HRST"; } @@ -1439,6 +1522,8 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .err_info_init = hpre_err_info_init, .get_err_result = hpre_get_err_result, .dev_is_abnormal = hpre_dev_is_abnormal, + .disable_axi_error = hpre_disable_axi_error, + .enable_axi_error = hpre_enable_axi_error, }; static int hpre_pf_probe_init(struct hpre *hpre) @@ -1450,8 +1535,6 @@ static int hpre_pf_probe_init(struct hpre *hpre) if (ret) return ret; - hpre_open_sva_prefetch(qm); - hisi_qm_dev_err_init(qm); ret = hpre_show_last_regs_init(qm); if (ret) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 2e4ee7ecfdfb..a5b96adf2d1e 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -45,6 +45,8 @@ #define QM_SQ_TYPE_MASK GENMASK(3, 0) #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1) +#define QM_SQC_DISABLE_QP (1U << 6) +#define QM_XQC_RANDOM_DATA 0xaaaa /* cqc shift */ #define QM_CQ_HOP_NUM_SHIFT 0 @@ -145,9 +147,9 @@ #define QM_RAS_CE_TIMES_PER_IRQ 1 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 #define QM_AXI_RRESP_ERR BIT(0) -#define QM_ECC_MBIT BIT(2) #define QM_DB_TIMEOUT BIT(10) #define QM_OF_FIFO_OF BIT(11) +#define QM_RAS_AXI_ERROR (BIT(0) | BIT(1) | BIT(12)) #define QM_RESET_WAIT_TIMEOUT 400 #define QM_PEH_VENDOR_ID 0x1000d8 @@ -163,7 +165,6 @@ #define ACC_MASTER_TRANS_RETURN 0x300150 #define ACC_MASTER_GLOBAL_CTRL 0x300000 #define ACC_AM_CFG_PORT_WR_EN 0x30001c -#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT #define ACC_AM_ROB_ECC_INT_STS 0x300104 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) #define QM_MSI_CAP_ENABLE BIT(16) @@ -520,7 +521,7 @@ static bool qm_check_dev_error(struct hisi_qm *qm) return false; err_status = qm_get_hw_error_status(pf_qm); - if (err_status & pf_qm->err_info.qm_shutdown_mask) + if (err_status & pf_qm->err_info.qm_err.shutdown_mask) return true; if (pf_qm->err_ini->dev_is_abnormal) @@ -1395,17 +1396,17 @@ static void qm_hw_error_init_v1(struct hisi_qm *qm) static void qm_hw_error_cfg(struct hisi_qm *qm) { - struct hisi_qm_err_info *err_info = &qm->err_info; + struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err; - qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; + qm->error_mask = qm_err->nfe | qm_err->ce | qm_err->fe; /* clear QM hw residual error source */ writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); /* configure error type */ - writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); + writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE); writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); - writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); - writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); + writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE); + writel(qm_err->fe, qm->io_base + QM_RAS_FE_ENABLE); } static void qm_hw_error_init_v2(struct hisi_qm *qm) @@ -1434,7 +1435,7 @@ static void qm_hw_error_init_v3(struct hisi_qm *qm) qm_hw_error_cfg(qm); /* enable close master ooo when hardware error happened */ - writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); + writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); irq_unmask = ~qm->error_mask; irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); @@ -1496,6 +1497,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) { + struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err; u32 error_status; error_status = qm_get_hw_error_status(qm); @@ -1504,17 +1506,16 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) qm->err_status.is_qm_ecc_mbit = true; qm_log_hw_error(qm, error_status); - if (error_status & qm->err_info.qm_reset_mask) { + if (error_status & qm_err->reset_mask) { /* Disable the same error reporting until device is recovered. */ - writel(qm->err_info.nfe & (~error_status), - qm->io_base + QM_RAS_NFE_ENABLE); + writel(qm_err->nfe & (~error_status), qm->io_base + QM_RAS_NFE_ENABLE); return ACC_ERR_NEED_RESET; } /* Clear error source if not need reset. */ writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); - writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); - writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE); + writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE); + writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE); } return ACC_ERR_RECOVERED; @@ -2742,6 +2743,27 @@ static void qm_remove_uacce(struct hisi_qm *qm) } } +static void qm_uacce_api_ver_init(struct hisi_qm *qm) +{ + struct uacce_device *uacce = qm->uacce; + + switch (qm->ver) { + case QM_HW_V1: + uacce->api_ver = HISI_QM_API_VER_BASE; + break; + case QM_HW_V2: + uacce->api_ver = HISI_QM_API_VER2_BASE; + break; + case QM_HW_V3: + case QM_HW_V4: + uacce->api_ver = HISI_QM_API_VER3_BASE; + break; + default: + uacce->api_ver = HISI_QM_API_VER5_BASE; + break; + } +} + static int qm_alloc_uacce(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -2776,13 +2798,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm) uacce->priv = qm; if (qm->ver == QM_HW_V1) - uacce->api_ver = HISI_QM_API_VER_BASE; - else if (qm->ver == QM_HW_V2) - uacce->api_ver = HISI_QM_API_VER2_BASE; - else - uacce->api_ver = HISI_QM_API_VER3_BASE; - - if (qm->ver == QM_HW_V1) mmio_page_nr = QM_DOORBELL_PAGE_NR; else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) mmio_page_nr = QM_DOORBELL_PAGE_NR + @@ -2801,6 +2816,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm) uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; qm->uacce = uacce; + qm_uacce_api_ver_init(qm); INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); mutex_init(&qm->isolate_data.isolate_lock); @@ -3179,6 +3195,9 @@ static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) qm_init_eq_aeq_status(qm); + /* Before starting the dev, clear the memory and then configure to device using. */ + memset(qm->qdma.va, 0, qm->qdma.size); + ret = qm_eq_ctx_cfg(qm); if (ret) { dev_err(dev, "Set eqc failed!\n"); @@ -3190,9 +3209,13 @@ static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) static int __hisi_qm_start(struct hisi_qm *qm) { + struct device *dev = &qm->pdev->dev; int ret; - WARN_ON(!qm->qdma.va); + if (!qm->qdma.va) { + dev_err(dev, "qm qdma is NULL!\n"); + return -EINVAL; + } if (qm->fun_type == QM_HW_PF) { ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); @@ -3266,7 +3289,7 @@ static int qm_restart(struct hisi_qm *qm) for (i = 0; i < qm->qp_num; i++) { qp = &qm->qp_array[i]; if (atomic_read(&qp->qp_status.flags) == QP_STOP && - qp->is_resetting == true) { + qp->is_resetting == true && qp->is_in_kernel == true) { ret = qm_start_qp_nolock(qp, 0); if (ret < 0) { dev_err(dev, "Failed to start qp%d!\n", i); @@ -3298,24 +3321,44 @@ static void qm_stop_started_qp(struct hisi_qm *qm) } /** - * qm_clear_queues() - Clear all queues memory in a qm. - * @qm: The qm in which the queues will be cleared. + * qm_invalid_queues() - invalid all queues in use. + * @qm: The qm in which the queues will be invalidated. * - * This function clears all queues memory in a qm. Reset of accelerator can - * use this to clear queues. + * This function invalid all queues in use. If the doorbell command is sent + * to device in user space after the device is reset, the device discards + * the doorbell command. */ -static void qm_clear_queues(struct hisi_qm *qm) +static void qm_invalid_queues(struct hisi_qm *qm) { struct hisi_qp *qp; + struct qm_sqc *sqc; + struct qm_cqc *cqc; int i; + /* + * Normal stop queues is no longer used and does not need to be + * invalid queues. + */ + if (qm->status.stop_reason == QM_NORMAL) + return; + + if (qm->status.stop_reason == QM_DOWN) + hisi_qm_cache_wb(qm); + for (i = 0; i < qm->qp_num; i++) { qp = &qm->qp_array[i]; - if (qp->is_in_kernel && qp->is_resetting) + if (!qp->is_resetting) + continue; + + /* Modify random data and set sqc close bit to invalid queue. */ + sqc = qm->sqc + i; + cqc = qm->cqc + i; + sqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA); + sqc->w13 = cpu_to_le16(QM_SQC_DISABLE_QP); + cqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA); + if (qp->is_in_kernel) memset(qp->qdma.va, 0, qp->qdma.size); } - - memset(qm->qdma.va, 0, qm->qdma.size); } /** @@ -3372,7 +3415,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) } } - qm_clear_queues(qm); + qm_invalid_queues(qm); qm->status.stop_reason = QM_NORMAL; err_unlock: @@ -3617,19 +3660,19 @@ static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) return 0; } -static int qm_clear_vft_config(struct hisi_qm *qm) +static void qm_clear_vft_config(struct hisi_qm *qm) { - int ret; u32 i; - for (i = 1; i <= qm->vfs_num; i++) { - ret = hisi_qm_set_vft(qm, i, 0, 0); - if (ret) - return ret; - } - qm->vfs_num = 0; + /* + * When disabling SR-IOV, clear the configuration of each VF in the hardware + * sequentially. Failure to clear a single VF should not affect the clearing + * operation of other VFs. + */ + for (i = 1; i <= qm->vfs_num; i++) + (void)hisi_qm_set_vft(qm, i, 0, 0); - return 0; + qm->vfs_num = 0; } static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) @@ -3826,6 +3869,10 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, } pdev = container_of(dev, struct pci_dev, dev); + if (pci_physfn(pdev) != qm->pdev) { + pci_err(qm->pdev, "the pdev input does not match the pf!\n"); + return -EINVAL; + } *fun_index = pdev->devfn; @@ -3960,13 +4007,13 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) goto err_put_sync; } + qm->vfs_num = num_vfs; ret = pci_enable_sriov(pdev, num_vfs); if (ret) { pci_err(pdev, "Can't enable VF!\n"); qm_clear_vft_config(qm); goto err_put_sync; } - qm->vfs_num = num_vfs; pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); @@ -4001,11 +4048,10 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) } pci_disable_sriov(pdev); - - qm->vfs_num = 0; + qm_clear_vft_config(qm); qm_pm_put_sync(qm); - return qm_clear_vft_config(qm); + return 0; } EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); @@ -4179,9 +4225,9 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) !qm->err_status.is_qm_ecc_mbit && !qm->err_ini->close_axi_master_ooo) { nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); - writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, + writel(nfe_enb & ~qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_RAS_NFE_ENABLE); - writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); + writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SET); } } @@ -4447,9 +4493,6 @@ static void qm_restart_prepare(struct hisi_qm *qm) { u32 value; - if (qm->err_ini->open_sva_prefetch) - qm->err_ini->open_sva_prefetch(qm); - if (qm->ver >= QM_HW_V3) return; @@ -4463,12 +4506,12 @@ static void qm_restart_prepare(struct hisi_qm *qm) qm->io_base + ACC_AM_CFG_PORT_WR_EN); /* clear dev ecc 2bit error source if having */ - value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; + value = qm_get_dev_err_status(qm) & qm->err_info.dev_err.ecc_2bits_mask; if (value && qm->err_ini->clear_dev_hw_err_status) qm->err_ini->clear_dev_hw_err_status(qm, value); /* clear QM ecc mbit error source */ - writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); + writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); /* clear AM Reorder Buffer ecc mbit source */ writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); @@ -4495,6 +4538,34 @@ clear_flags: qm->err_status.is_dev_ecc_mbit = false; } +static void qm_disable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err; + u32 val; + + val = ~(qm->error_mask & (~QM_RAS_AXI_ERROR)); + writel(val, qm->io_base + QM_ABNORMAL_INT_MASK); + if (qm->ver > QM_HW_V2) + writel(qm_err->shutdown_mask & (~QM_RAS_AXI_ERROR), + qm->io_base + QM_OOO_SHUTDOWN_SEL); + + if (qm->err_ini->disable_axi_error) + qm->err_ini->disable_axi_error(qm); +} + +static void qm_enable_axi_error(struct hisi_qm *qm) +{ + /* clear axi error source */ + writel(QM_RAS_AXI_ERROR, qm->io_base + QM_ABNORMAL_INT_SOURCE); + + writel(~qm->error_mask, qm->io_base + QM_ABNORMAL_INT_MASK); + if (qm->ver > QM_HW_V2) + writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); + + if (qm->err_ini->enable_axi_error) + qm->err_ini->enable_axi_error(qm); +} + static int qm_controller_reset_done(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -4528,6 +4599,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm) qm_restart_prepare(qm); hisi_qm_dev_err_init(qm); + qm_disable_axi_error(qm); if (qm->err_ini->open_axi_master_ooo) qm->err_ini->open_axi_master_ooo(qm); @@ -4550,7 +4622,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm) ret = qm_wait_vf_prepare_finish(qm); if (ret) pci_err(pdev, "failed to start by vfs in soft reset!\n"); - + qm_enable_axi_error(qm); qm_cmd_init(qm); qm_restart_done(qm); @@ -4731,6 +4803,15 @@ flr_done: } EXPORT_SYMBOL_GPL(hisi_qm_reset_done); +static irqreturn_t qm_rsvd_irq(int irq, void *data) +{ + struct hisi_qm *qm = data; + + dev_info(&qm->pdev->dev, "Reserved interrupt, ignore!\n"); + + return IRQ_HANDLED; +} + static irqreturn_t qm_abnormal_irq(int irq, void *data) { struct hisi_qm *qm = data; @@ -4760,8 +4841,6 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev) ret = hisi_qm_stop(qm, QM_DOWN); if (ret) dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); - - hisi_qm_cache_wb(qm); } EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); @@ -5014,7 +5093,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm) struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; - if (qm->fun_type == QM_HW_VF) + if (qm->fun_type == QM_HW_VF && qm->ver < QM_HW_V3) return; val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; @@ -5031,17 +5110,28 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm) u32 irq_vector, val; int ret; - if (qm->fun_type == QM_HW_VF) - return 0; - val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) return 0; - irq_vector = val & QM_IRQ_VECTOR_MASK; + + /* For VF, this is a reserved interrupt in V3 version. */ + if (qm->fun_type == QM_HW_VF) { + if (qm->ver < QM_HW_V3) + return 0; + + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_rsvd_irq, + IRQF_NO_AUTOEN, qm->dev_name, qm); + if (ret) { + dev_err(&pdev->dev, "failed to request reserved irq, ret = %d!\n", ret); + return ret; + } + return 0; + } + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); if (ret) - dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); + dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d!\n", ret); return ret; } @@ -5407,6 +5497,12 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) pci_set_master(pdev); num_vec = qm_get_irq_num(qm); + if (!num_vec) { + dev_err(dev, "Device irq num is zero!\n"); + ret = -EINVAL; + goto err_get_pci_res; + } + num_vec = roundup_pow_of_two(num_vec); ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); if (ret < 0) { dev_err(dev, "Failed to enable MSI vectors!\n"); diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index ef0cb733c92c..129cb6faa0b7 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c @@ -922,7 +922,8 @@ static int sec_hw_init(struct sec_dev_info *info) struct iommu_domain *domain; u32 sec_ipv4_mask = 0; u32 sec_ipv6_mask[10] = {}; - u32 i, ret; + int ret; + u32 i; domain = iommu_get_domain_for_dev(info->dev); diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index d044ded0f290..31590d01139a 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -1944,14 +1944,12 @@ static void sec_request_uninit(struct sec_req *req) static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) { struct sec_qp_ctx *qp_ctx; - int i; + int i = 0; - for (i = 0; i < ctx->sec->ctx_q_num; i++) { + do { qp_ctx = &ctx->qp_ctx[i]; req->req_id = sec_alloc_req_id(req, qp_ctx); - if (req->req_id >= 0) - break; - } + } while (req->req_id < 0 && ++i < ctx->sec->ctx_q_num); req->qp_ctx = qp_ctx; req->backlog = &qp_ctx->backlog; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 72cf48d1f3ab..5eb2d6820742 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -47,6 +47,8 @@ #define SEC_RAS_FE_ENB_MSK 0x0 #define SEC_OOO_SHUTDOWN_SEL 0x301014 #define SEC_RAS_DISABLE 0x0 +#define SEC_AXI_ERROR_MASK (BIT(0) | BIT(1)) + #define SEC_MEM_START_INIT_REG 0x301100 #define SEC_MEM_INIT_DONE_REG 0x301104 @@ -93,6 +95,16 @@ #define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11))) #define SEC_PREFETCH_DISABLE BIT(1) #define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11)) +#define SEC_SVA_PREFETCH_INFO 0x301ED4 +#define SEC_SVA_STALL_NUM GENMASK(23, 8) +#define SEC_SVA_PREFETCH_NUM GENMASK(2, 0) +#define SEC_WAIT_SVA_READY 500000 +#define SEC_READ_SVA_STATUS_TIMES 3 +#define SEC_WAIT_US_MIN 10 +#define SEC_WAIT_US_MAX 20 +#define SEC_WAIT_QP_US_MIN 1000 +#define SEC_WAIT_QP_US_MAX 2000 +#define SEC_MAX_WAIT_TIMES 2000 #define SEC_DELAY_10_US 10 #define SEC_POLL_TIMEOUT_US 1000 @@ -464,6 +476,81 @@ static void sec_set_endian(struct hisi_qm *qm) writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); } +static int sec_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask) +{ + u32 val, try_times = 0; + u8 count = 0; + + /* + * Read the register value every 10-20us. If the value is 0 for three + * consecutive times, the SVA module is ready. + */ + do { + val = readl(qm->io_base + offset); + if (val & mask) + count = 0; + else if (++count == SEC_READ_SVA_STATUS_TIMES) + break; + + usleep_range(SEC_WAIT_US_MIN, SEC_WAIT_US_MAX); + } while (++try_times < SEC_WAIT_SVA_READY); + + if (try_times == SEC_WAIT_SVA_READY) { + pci_err(qm->pdev, "failed to wait sva prefetch ready\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void sec_close_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) + return; + + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); + val |= SEC_PREFETCH_DISABLE; + writel(val, qm->io_base + SEC_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, + val, !(val & SEC_SVA_DISABLE_READY), + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to close sva prefetch\n"); + + (void)sec_wait_sva_ready(qm, SEC_SVA_PREFETCH_INFO, SEC_SVA_STALL_NUM); +} + +static void sec_open_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) + return; + + /* Enable prefetch */ + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); + val &= SEC_PREFETCH_ENABLE; + writel(val, qm->io_base + SEC_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, + val, !(val & SEC_PREFETCH_DISABLE), + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); + if (ret) { + pci_err(qm->pdev, "failed to open sva prefetch\n"); + sec_close_sva_prefetch(qm); + return; + } + + ret = sec_wait_sva_ready(qm, SEC_SVA_TRANS, SEC_SVA_PREFETCH_NUM); + if (ret) + sec_close_sva_prefetch(qm); +} + static void sec_engine_sva_config(struct hisi_qm *qm) { u32 reg; @@ -497,45 +584,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm) writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); } -} - -static void sec_open_sva_prefetch(struct hisi_qm *qm) -{ - u32 val; - int ret; - - if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) - return; - - /* Enable prefetch */ - val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); - val &= SEC_PREFETCH_ENABLE; - writel(val, qm->io_base + SEC_PREFETCH_CFG); - - ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, - val, !(val & SEC_PREFETCH_DISABLE), - SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); - if (ret) - pci_err(qm->pdev, "failed to open sva prefetch\n"); -} - -static void sec_close_sva_prefetch(struct hisi_qm *qm) -{ - u32 val; - int ret; - - if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) - return; - - val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); - val |= SEC_PREFETCH_DISABLE; - writel(val, qm->io_base + SEC_PREFETCH_CFG); - - ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, - val, !(val & SEC_SVA_DISABLE_READY), - SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); - if (ret) - pci_err(qm->pdev, "failed to close sva prefetch\n"); + sec_open_sva_prefetch(qm); } static void sec_enable_clock_gate(struct hisi_qm *qm) @@ -666,8 +715,7 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) val1 = readl(qm->io_base + SEC_CONTROL_REG); if (enable) { val1 |= SEC_AXI_SHUTDOWN_ENABLE; - val2 = hisi_qm_get_hw_info(qm, sec_basic_info, - SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + val2 = qm->err_info.dev_err.shutdown_mask; } else { val1 &= SEC_AXI_SHUTDOWN_DISABLE; val2 = 0x0; @@ -681,7 +729,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) static void sec_hw_error_enable(struct hisi_qm *qm) { - u32 ce, nfe; + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; if (qm->ver == QM_HW_V1) { writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); @@ -689,22 +738,19 @@ static void sec_hw_error_enable(struct hisi_qm *qm) return; } - ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver); - nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); - /* clear SEC hw error source if having */ - writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE); + writel(err_mask, qm->io_base + SEC_CORE_INT_SOURCE); /* enable RAS int */ - writel(ce, qm->io_base + SEC_RAS_CE_REG); - writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); - writel(nfe, qm->io_base + SEC_RAS_NFE_REG); + writel(dev_err->ce, qm->io_base + SEC_RAS_CE_REG); + writel(dev_err->fe, qm->io_base + SEC_RAS_FE_REG); + writel(dev_err->nfe, qm->io_base + SEC_RAS_NFE_REG); /* enable SEC block master OOO when nfe occurs on Kunpeng930 */ sec_master_ooo_ctrl(qm, true); /* enable SEC hw error interrupts */ - writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK); + writel(err_mask, qm->io_base + SEC_CORE_INT_MASK); } static void sec_hw_error_disable(struct hisi_qm *qm) @@ -1061,12 +1107,20 @@ static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type) { - u32 nfe_mask; + u32 nfe_mask = qm->err_info.dev_err.nfe; - nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG); } +static void sec_enable_error_report(struct hisi_qm *qm) +{ + u32 nfe_mask = qm->err_info.dev_err.nfe; + u32 ce_mask = qm->err_info.dev_err.ce; + + writel(nfe_mask, qm->io_base + SEC_RAS_NFE_REG); + writel(ce_mask, qm->io_base + SEC_RAS_CE_REG); +} + static void sec_open_axi_master_ooo(struct hisi_qm *qm) { u32 val; @@ -1082,16 +1136,18 @@ static enum acc_err_result sec_get_err_result(struct hisi_qm *qm) err_status = sec_get_hw_err_status(qm); if (err_status) { - if (err_status & qm->err_info.ecc_2bits_mask) + if (err_status & qm->err_info.dev_err.ecc_2bits_mask) qm->err_status.is_dev_ecc_mbit = true; sec_log_hw_error(qm, err_status); - if (err_status & qm->err_info.dev_reset_mask) { + if (err_status & qm->err_info.dev_err.reset_mask) { /* Disable the same error reporting until device is recovered. */ sec_disable_error_report(qm, err_status); return ACC_ERR_NEED_RESET; } sec_clear_hw_err_status(qm, err_status); + /* Avoid firmware disable error report, re-enable. */ + sec_enable_error_report(qm); } return ACC_ERR_RECOVERED; @@ -1102,28 +1158,62 @@ static bool sec_dev_is_abnormal(struct hisi_qm *qm) u32 err_status; err_status = sec_get_hw_err_status(qm); - if (err_status & qm->err_info.dev_shutdown_mask) + if (err_status & qm->err_info.dev_err.shutdown_mask) return true; return false; } +static void sec_disable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + + writel(err_mask & ~SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_MASK); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask & (~SEC_AXI_ERROR_MASK), + qm->io_base + SEC_OOO_SHUTDOWN_SEL); +} + +static void sec_enable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + + /* clear axi error source */ + writel(SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_SOURCE); + + writel(err_mask, qm->io_base + SEC_CORE_INT_MASK); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask, qm->io_base + SEC_OOO_SHUTDOWN_SEL); +} + static void sec_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; + struct hisi_qm_err_mask *qm_err = &err_info->qm_err; + struct hisi_qm_err_mask *dev_err = &err_info->dev_err; + + qm_err->fe = SEC_RAS_FE_ENB_MSK; + qm_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver); + qm_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver); + qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + qm_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_QM_RESET_MASK_CAP, qm->cap_ver); + qm_err->ecc_2bits_mask = QM_ECC_MBIT; + + dev_err->fe = SEC_RAS_FE_ENB_MSK; + dev_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver); + dev_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); + dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + dev_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_RESET_MASK_CAP, qm->cap_ver); + dev_err->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; - err_info->fe = SEC_RAS_FE_ENB_MSK; - err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver); - err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver); - err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; - err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, - SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, - SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, - SEC_QM_RESET_MASK_CAP, qm->cap_ver); - err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, - SEC_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = BIT(0); err_info->acpi_rst = "SRST"; } @@ -1141,6 +1231,8 @@ static const struct hisi_qm_err_ini sec_err_ini = { .err_info_init = sec_err_info_init, .get_err_result = sec_get_err_result, .dev_is_abnormal = sec_dev_is_abnormal, + .disable_axi_error = sec_disable_axi_error, + .enable_axi_error = sec_enable_axi_error, }; static int sec_pf_probe_init(struct sec_dev *sec) @@ -1152,7 +1244,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) if (ret) return ret; - sec_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); sec_debug_regs_clear(qm); ret = sec_show_last_regs_init(qm); @@ -1169,7 +1260,7 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm) size_t i, size; size = ARRAY_SIZE(sec_cap_query_info); - sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL); + sec_cap = devm_kcalloc(&pdev->dev, size, sizeof(*sec_cap), GFP_KERNEL); if (!sec_cap) return -ENOMEM; diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c index 6f22e4c36e49..68aebd02fc84 100644 --- a/drivers/crypto/hisilicon/zip/dae_main.c +++ b/drivers/crypto/hisilicon/zip/dae_main.c @@ -15,6 +15,7 @@ #define DAE_REG_RD_TMOUT_US USEC_PER_SEC #define DAE_ALG_NAME "hashagg" +#define DAE_V5_ALG_NAME "hashagg\nudma\nhashjoin\ngather" /* error */ #define DAE_AXI_CFG_OFFSET 0x331000 @@ -82,6 +83,7 @@ int hisi_dae_set_user_domain(struct hisi_qm *qm) int hisi_dae_set_alg(struct hisi_qm *qm) { + const char *alg_name; size_t len; if (!dae_is_support(qm)) @@ -90,9 +92,14 @@ int hisi_dae_set_alg(struct hisi_qm *qm) if (!qm->uacce) return 0; + if (qm->ver >= QM_HW_V5) + alg_name = DAE_V5_ALG_NAME; + else + alg_name = DAE_ALG_NAME; + len = strlen(qm->uacce->algs); /* A line break may be required */ - if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) { + if (len + strlen(alg_name) + 1 >= QM_DEV_ALG_MAX_LEN) { pci_err(qm->pdev, "algorithm name is too long!\n"); return -EINVAL; } @@ -100,7 +107,7 @@ int hisi_dae_set_alg(struct hisi_qm *qm) if (len) strcat((char *)qm->uacce->algs, "\n"); - strcat((char *)qm->uacce->algs, DAE_ALG_NAME); + strcat((char *)qm->uacce->algs, alg_name); return 0; } @@ -168,6 +175,12 @@ static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type) writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET); } +static void hisi_dae_enable_error_report(struct hisi_qm *qm) +{ + writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET); + writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET); +} + static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type) { const struct hisi_dae_hw_error *err = dae_hw_error; @@ -209,6 +222,8 @@ enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm) return ACC_ERR_NEED_RESET; } hisi_dae_clear_hw_err_status(qm, err_status); + /* Avoid firmware disable error report, re-enable. */ + hisi_dae_enable_error_report(qm); return ACC_ERR_RECOVERED; } diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index d8ba23b7cc7d..4fcbe6bada06 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -65,6 +65,7 @@ #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24 #define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0) +#define HZIP_AXI_ERROR_MASK (BIT(2) | BIT(3)) #define HZIP_SQE_SIZE 128 #define HZIP_PF_DEF_Q_NUM 64 #define HZIP_PF_DEF_Q_BASE 0 @@ -80,6 +81,7 @@ #define HZIP_ALG_GZIP_BIT GENMASK(3, 2) #define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4) #define HZIP_ALG_LZ77_BIT GENMASK(7, 6) +#define HZIP_ALG_LZ4_BIT GENMASK(9, 8) #define HZIP_BUF_SIZE 22 #define HZIP_SQE_MASK_OFFSET 64 @@ -95,10 +97,16 @@ #define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0))) #define HZIP_SVA_PREFETCH_DISABLE BIT(26) #define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30)) +#define HZIP_SVA_PREFETCH_NUM GENMASK(18, 16) +#define HZIP_SVA_STALL_NUM GENMASK(15, 0) #define HZIP_SHAPER_RATE_COMPRESS 750 #define HZIP_SHAPER_RATE_DECOMPRESS 140 -#define HZIP_DELAY_1_US 1 -#define HZIP_POLL_TIMEOUT_US 1000 +#define HZIP_DELAY_1_US 1 +#define HZIP_POLL_TIMEOUT_US 1000 +#define HZIP_WAIT_SVA_READY 500000 +#define HZIP_READ_SVA_STATUS_TIMES 3 +#define HZIP_WAIT_US_MIN 10 +#define HZIP_WAIT_US_MAX 20 /* clock gating */ #define HZIP_PEH_CFG_AUTO_GATE 0x3011A8 @@ -111,6 +119,9 @@ /* zip comp high performance */ #define HZIP_HIGH_PERF_OFFSET 0x301208 +#define HZIP_LIT_LEN_EN_OFFSET 0x301204 +#define HZIP_LIT_LEN_EN_EN BIT(4) + enum { HZIP_HIGH_COMP_RATE, HZIP_HIGH_COMP_PERF, @@ -141,6 +152,12 @@ static const struct qm_dev_alg zip_dev_algs[] = { { }, { .alg_msk = HZIP_ALG_LZ77_BIT, .alg = "lz77_zstd\n", + }, { + .alg_msk = HZIP_ALG_LZ77_BIT, + .alg = "lz77_only\n", + }, { + .alg_msk = HZIP_ALG_LZ4_BIT, + .alg = "lz4\n", }, }; @@ -448,10 +465,23 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg) return false; } -static int hisi_zip_set_high_perf(struct hisi_qm *qm) +static void hisi_zip_literal_set(struct hisi_qm *qm) +{ + u32 val; + + if (qm->ver < QM_HW_V3) + return; + + val = readl_relaxed(qm->io_base + HZIP_LIT_LEN_EN_OFFSET); + val &= ~HZIP_LIT_LEN_EN_EN; + + /* enable literal length in stream mode compression */ + writel(val, qm->io_base + HZIP_LIT_LEN_EN_OFFSET); +} + +static void hisi_zip_set_high_perf(struct hisi_qm *qm) { u32 val; - int ret; val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET); if (perf_mode == HZIP_HIGH_COMP_PERF) @@ -461,16 +491,36 @@ static int hisi_zip_set_high_perf(struct hisi_qm *qm) /* Set perf mode */ writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET); - ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET, - val, val == perf_mode, HZIP_DELAY_1_US, - HZIP_POLL_TIMEOUT_US); - if (ret) - pci_err(qm->pdev, "failed to set perf mode\n"); +} - return ret; +static int hisi_zip_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask) +{ + u32 val, try_times = 0; + u8 count = 0; + + /* + * Read the register value every 10-20us. If the value is 0 for three + * consecutive times, the SVA module is ready. + */ + do { + val = readl(qm->io_base + offset); + if (val & mask) + count = 0; + else if (++count == HZIP_READ_SVA_STATUS_TIMES) + break; + + usleep_range(HZIP_WAIT_US_MIN, HZIP_WAIT_US_MAX); + } while (++try_times < HZIP_WAIT_SVA_READY); + + if (try_times == HZIP_WAIT_SVA_READY) { + pci_err(qm->pdev, "failed to wait sva prefetch ready\n"); + return -ETIMEDOUT; + } + + return 0; } -static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) +static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm) { u32 val; int ret; @@ -478,19 +528,20 @@ static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; - /* Enable prefetch */ val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG); - val &= HZIP_PREFETCH_ENABLE; + val |= HZIP_SVA_PREFETCH_DISABLE; writel(val, qm->io_base + HZIP_PREFETCH_CFG); - ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG, - val, !(val & HZIP_SVA_PREFETCH_DISABLE), + ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS, + val, !(val & HZIP_SVA_DISABLE_READY), HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US); if (ret) - pci_err(qm->pdev, "failed to open sva prefetch\n"); + pci_err(qm->pdev, "failed to close sva prefetch\n"); + + (void)hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_STALL_NUM); } -static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm) +static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) { u32 val; int ret; @@ -498,15 +549,23 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm) if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; + /* Enable prefetch */ val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG); - val |= HZIP_SVA_PREFETCH_DISABLE; + val &= HZIP_PREFETCH_ENABLE; writel(val, qm->io_base + HZIP_PREFETCH_CFG); - ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS, - val, !(val & HZIP_SVA_DISABLE_READY), + ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG, + val, !(val & HZIP_SVA_PREFETCH_DISABLE), HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US); + if (ret) { + pci_err(qm->pdev, "failed to open sva prefetch\n"); + hisi_zip_close_sva_prefetch(qm); + return; + } + + ret = hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_PREFETCH_NUM); if (ret) - pci_err(qm->pdev, "failed to close sva prefetch\n"); + hisi_zip_close_sva_prefetch(qm); } static void hisi_zip_enable_clock_gate(struct hisi_qm *qm) @@ -530,6 +589,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) void __iomem *base = qm->io_base; u32 dcomp_bm, comp_bm; u32 zip_core_en; + int ret; /* qm user domain */ writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); @@ -565,6 +625,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63); writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); } + hisi_zip_open_sva_prefetch(qm); /* let's open all compression/decompression cores */ @@ -580,9 +641,19 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL); + hisi_zip_set_high_perf(qm); + hisi_zip_literal_set(qm); hisi_zip_enable_clock_gate(qm); - return hisi_dae_set_user_domain(qm); + ret = hisi_dae_set_user_domain(qm); + if (ret) + goto close_sva_prefetch; + + return 0; + +close_sva_prefetch: + hisi_zip_close_sva_prefetch(qm); + return ret; } static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable) @@ -592,8 +663,7 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable) val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); if (enable) { val1 |= HZIP_AXI_SHUTDOWN_ENABLE; - val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + val2 = qm->err_info.dev_err.shutdown_mask; } else { val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE; val2 = 0x0; @@ -607,7 +677,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable) static void hisi_zip_hw_error_enable(struct hisi_qm *qm) { - u32 nfe, ce; + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; if (qm->ver == QM_HW_V1) { writel(HZIP_CORE_INT_MASK_ALL, @@ -616,33 +687,29 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm) return; } - nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); - ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); - /* clear ZIP hw error source if having */ - writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE); + writel(err_mask, qm->io_base + HZIP_CORE_INT_SOURCE); /* configure error type */ - writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); - writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); - writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(dev_err->ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); + writel(dev_err->fe, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); + writel(dev_err->nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); hisi_zip_master_ooo_ctrl(qm, true); /* enable ZIP hw error interrupts */ - writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG); + writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG); hisi_dae_hw_error_enable(qm); } static void hisi_zip_hw_error_disable(struct hisi_qm *qm) { - u32 nfe, ce; + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; /* disable ZIP hw error interrupts */ - nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); - ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); - writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG); + writel(err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG); hisi_zip_master_ooo_ctrl(qm, false); @@ -1116,12 +1183,20 @@ static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type) { - u32 nfe_mask; + u32 nfe_mask = qm->err_info.dev_err.nfe; - nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); } +static void hisi_zip_enable_error_report(struct hisi_qm *qm) +{ + u32 nfe_mask = qm->err_info.dev_err.nfe; + u32 ce_mask = qm->err_info.dev_err.ce; + + writel(nfe_mask, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(ce_mask, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); +} + static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm) { u32 val; @@ -1160,16 +1235,18 @@ static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm) /* Get device hardware new error status */ err_status = hisi_zip_get_hw_err_status(qm); if (err_status) { - if (err_status & qm->err_info.ecc_2bits_mask) + if (err_status & qm->err_info.dev_err.ecc_2bits_mask) qm->err_status.is_dev_ecc_mbit = true; hisi_zip_log_hw_error(qm, err_status); - if (err_status & qm->err_info.dev_reset_mask) { + if (err_status & qm->err_info.dev_err.reset_mask) { /* Disable the same error reporting until device is recovered. */ hisi_zip_disable_error_report(qm, err_status); - return ACC_ERR_NEED_RESET; + zip_result = ACC_ERR_NEED_RESET; } else { hisi_zip_clear_hw_err_status(qm, err_status); + /* Avoid firmware disable error report, re-enable. */ + hisi_zip_enable_error_report(qm); } } @@ -1185,7 +1262,7 @@ static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm) u32 err_status; err_status = hisi_zip_get_hw_err_status(qm); - if (err_status & qm->err_info.dev_shutdown_mask) + if (err_status & qm->err_info.dev_err.shutdown_mask) return true; return hisi_dae_dev_is_abnormal(qm); @@ -1196,23 +1273,59 @@ static int hisi_zip_set_priv_status(struct hisi_qm *qm) return hisi_dae_close_axi_master_ooo(qm); } +static void hisi_zip_disable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + u32 val; + + val = ~(err_mask & (~HZIP_AXI_ERROR_MASK)); + writel(val, qm->io_base + HZIP_CORE_INT_MASK_REG); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask & (~HZIP_AXI_ERROR_MASK), + qm->io_base + HZIP_OOO_SHUTDOWN_SEL); +} + +static void hisi_zip_enable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + + /* clear axi error source */ + writel(HZIP_AXI_ERROR_MASK, qm->io_base + HZIP_CORE_INT_SOURCE); + + writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask, qm->io_base + HZIP_OOO_SHUTDOWN_SEL); +} + static void hisi_zip_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; + struct hisi_qm_err_mask *qm_err = &err_info->qm_err; + struct hisi_qm_err_mask *dev_err = &err_info->dev_err; + + qm_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK; + qm_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver); + qm_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_QM_NFE_MASK_CAP, qm->cap_ver); + qm_err->ecc_2bits_mask = QM_ECC_MBIT; + qm_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_QM_RESET_MASK_CAP, qm->cap_ver); + qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + + dev_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK; + dev_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); + dev_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); + dev_err->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; + dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + dev_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_RESET_MASK_CAP, qm->cap_ver); - err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK; - err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver); - err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_QM_NFE_MASK_CAP, qm->cap_ver); - err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; - err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_QM_RESET_MASK_CAP, qm->cap_ver); - err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = HZIP_WR_PORT; err_info->acpi_rst = "ZRST"; } @@ -1232,6 +1345,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .get_err_result = hisi_zip_get_err_result, .set_priv_status = hisi_zip_set_priv_status, .dev_is_abnormal = hisi_zip_dev_is_abnormal, + .disable_axi_error = hisi_zip_disable_axi_error, + .enable_axi_error = hisi_zip_enable_axi_error, }; static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) @@ -1251,11 +1366,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) if (ret) return ret; - ret = hisi_zip_set_high_perf(qm); - if (ret) - return ret; - - hisi_zip_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm); @@ -1273,7 +1383,7 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm) size_t i, size; size = ARRAY_SIZE(zip_cap_query_info); - zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL); + zip_cap = devm_kcalloc(&pdev->dev, size, sizeof(*zip_cap), GFP_KERNEL); if (!zip_cap) return -ENOMEM; |