diff options
Diffstat (limited to 'drivers/pci/controller')
36 files changed, 2343 insertions, 469 deletions
| diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 0341d51d6aed..ef1cfdae33bb 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -355,6 +355,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {  static const struct j721e_pcie_data j7200_pcie_ep_data = {  	.mode = PCI_MODE_EP,  	.quirk_detect_quiet_flag = true, +	.linkdown_irq_regfield = J7200_LINK_DOWN,  	.quirk_disable_flr = true,  	.max_lanes = 2,  }; @@ -376,13 +377,13 @@ static const struct j721e_pcie_data j784s4_pcie_rc_data = {  	.mode = PCI_MODE_RC,  	.quirk_retrain_flag = true,  	.byte_access_allowed = false, -	.linkdown_irq_regfield = LINK_DOWN, +	.linkdown_irq_regfield = J7200_LINK_DOWN,  	.max_lanes = 4,  };  static const struct j721e_pcie_data j784s4_pcie_ep_data = {  	.mode = PCI_MODE_EP, -	.linkdown_irq_regfield = LINK_DOWN, +	.linkdown_irq_regfield = J7200_LINK_DOWN,  	.max_lanes = 4,  }; diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index e0cc4560dfde..599ec4b1223e 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -301,12 +301,12 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,  	val |= interrupts;  	cdns_pcie_ep_fn_writew(pcie, fn, reg, val); -	/* Set MSIX BAR and offset */ +	/* Set MSI-X BAR and offset */  	reg = cap + PCI_MSIX_TABLE;  	val = offset | bir;  	cdns_pcie_ep_fn_writel(pcie, fn, reg, val); -	/* Set PBA BAR and offset.  BAR must match MSIX BAR */ +	/* Set PBA BAR and offset.  BAR must match MSI-X BAR */  	reg = cap + PCI_MSIX_PBA;  	val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;  	cdns_pcie_ep_fn_writel(pcie, fn, reg, val); @@ -352,8 +352,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,  	spin_unlock_irqrestore(&ep->lock, flags);  	offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | -		 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | -		 CDNS_PCIE_MSG_NO_DATA; +		 CDNS_PCIE_NORMAL_MSG_CODE(msg_code);  	writel(0, ep->irq_cpu_addr + offset);  } @@ -573,8 +572,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)  	/*  	 * Next function field in ARI_CAP_AND_CTR register for last function -	 * should be 0. -	 * Clearing Next Function Number field for the last function used. +	 * should be 0.  Clear Next Function Number field for the last +	 * function used.  	 */  	last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);  	reg     = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn); diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index f5eeff834ec1..39ee9945c903 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -246,7 +246,7 @@ struct cdns_pcie_rp_ib_bar {  #define CDNS_PCIE_NORMAL_MSG_CODE_MASK		GENMASK(15, 8)  #define CDNS_PCIE_NORMAL_MSG_CODE(code) \  	(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) -#define CDNS_PCIE_MSG_NO_DATA			BIT(16) +#define CDNS_PCIE_MSG_DATA			BIT(16)  struct cdns_pcie; diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index b6d6778b0698..d9f0386396ed 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -6,6 +6,16 @@ menu "DesignWare-based PCIe controllers"  config PCIE_DW  	bool +config PCIE_DW_DEBUGFS +	bool "DesignWare PCIe debugfs entries" +	depends on DEBUG_FS +	depends on PCIE_DW_HOST || PCIE_DW_EP +	help +	  Say Y here to enable debugfs entries for the PCIe controller. These +	  entries provide various debug features related to the controller and +	  expose the RAS DES capabilities such as Silicon Debug, Error Injection +	  and Statistical Counters. +  config PCIE_DW_HOST  	bool  	select PCIE_DW @@ -27,6 +37,17 @@ config PCIE_AL  	  required only for DT-based platforms. ACPI platforms with the  	  Annapurna Labs PCIe controller don't need to enable this. +config PCIE_AMD_MDB +	bool "AMD MDB Versal2 PCIe controller" +	depends on OF && (ARM64 || COMPILE_TEST) +	depends on PCI_MSI +	select PCIE_DW_HOST +	help +	  Say Y here if you want to enable PCIe controller support on AMD +	  Versal2 SoCs. The AMD MDB Versal2 PCIe controller is based on +	  DesignWare IP and therefore the driver re-uses the DesignWare +	  core functions to implement the driver. +  config PCI_MESON  	tristate "Amlogic Meson PCIe controller"  	default m if ARCH_MESON diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index a8308d9ea986..908cb7f345db 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -1,8 +1,10 @@  # SPDX-License-Identifier: GPL-2.0  obj-$(CONFIG_PCIE_DW) += pcie-designware.o +obj-$(CONFIG_PCIE_DW_DEBUGFS) += pcie-designware-debugfs.o  obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o  obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o  obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o +obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o  obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o  obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o  obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 90ace941090f..5f267dd261b5 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -41,7 +41,6 @@  #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE	BIT(11)  #define IMX8MQ_GPR_PCIE_VREG_BYPASS		BIT(12)  #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE	GENMASK(11, 8) -#define IMX8MQ_PCIE2_BASE_ADDR			0x33c00000  #define IMX95_PCIE_PHY_GEN_CTRL			0x0  #define IMX95_PCIE_REF_USE_PAD			BIT(17) @@ -109,7 +108,6 @@ enum imx_pcie_variants {  #define imx_check_flag(pci, val)	(pci->drvdata->flags & val) -#define IMX_PCIE_MAX_CLKS	6  #define IMX_PCIE_MAX_INSTANCES	2  struct imx_pcie; @@ -120,9 +118,6 @@ struct imx_pcie_drvdata {  	u32 flags;  	int dbi_length;  	const char *gpr; -	const char * const *clk_names; -	const u32 clks_cnt; -	const u32 clks_optional_cnt;  	const u32 ltssm_off;  	const u32 ltssm_mask;  	const u32 mode_off[IMX_PCIE_MAX_INSTANCES]; @@ -137,7 +132,8 @@ struct imx_pcie_drvdata {  struct imx_pcie {  	struct dw_pcie		*pci;  	struct gpio_desc	*reset_gpiod; -	struct clk_bulk_data	clks[IMX_PCIE_MAX_CLKS]; +	struct clk_bulk_data	*clks; +	int			num_clks;  	struct regmap		*iomuxc_gpr;  	u16			msi_ctrl;  	u32			controller_id; @@ -470,13 +466,14 @@ static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)  	int mult, div;  	u16 val;  	int i; +	struct clk_bulk_data *clks = imx_pcie->clks;  	if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))  		return 0; -	for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) -		if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0) -			phy_rate = clk_get_rate(imx_pcie->clks[i].clk); +	for (i = 0; i < imx_pcie->num_clks; i++) +		if (strncmp(clks[i].id, "pcie_phy", 8) == 0) +			phy_rate = clk_get_rate(clks[i].clk);  	switch (phy_rate) {  	case 125000000: @@ -668,7 +665,7 @@ static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)  	struct device *dev = pci->dev;  	int ret; -	ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); +	ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks);  	if (ret)  		return ret; @@ -685,7 +682,7 @@ static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)  	return 0;  err_ref_clk: -	clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); +	clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);  	return ret;  } @@ -694,7 +691,7 @@ static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie)  {  	if (imx_pcie->drvdata->enable_ref_clk)  		imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); -	clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); +	clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);  }  static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) @@ -1217,22 +1214,6 @@ static void imx_pcie_host_exit(struct dw_pcie_rp *pp)  		regulator_disable(imx_pcie->vpcie);  } -static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr) -{ -	struct imx_pcie *imx_pcie = to_imx_pcie(pcie); -	struct dw_pcie_rp *pp = &pcie->pp; -	struct resource_entry *entry; - -	if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP)) -		return cpu_addr; - -	entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); -	if (!entry) -		return cpu_addr; - -	return cpu_addr - entry->offset; -} -  /*   * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2   * register is reserved, so the generic DWC implementation of sending the @@ -1263,7 +1244,6 @@ static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {  static const struct dw_pcie_ops dw_pcie_ops = {  	.start_link = imx_pcie_start_link,  	.stop_link = imx_pcie_stop_link, -	.cpu_addr_fixup = imx_pcie_cpu_addr_fixup,  };  static void imx_pcie_ep_init(struct dw_pcie_ep *ep) @@ -1474,9 +1454,8 @@ static int imx_pcie_probe(struct platform_device *pdev)  	struct dw_pcie *pci;  	struct imx_pcie *imx_pcie;  	struct device_node *np; -	struct resource *dbi_base;  	struct device_node *node = dev->of_node; -	int i, ret, req_cnt; +	int ret, domain;  	u16 val;  	imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL); @@ -1515,10 +1494,6 @@ static int imx_pcie_probe(struct platform_device *pdev)  			return PTR_ERR(imx_pcie->phy_base);  	} -	pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base); -	if (IS_ERR(pci->dbi_base)) -		return PTR_ERR(pci->dbi_base); -  	/* Fetch GPIOs */  	imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);  	if (IS_ERR(imx_pcie->reset_gpiod)) @@ -1526,20 +1501,11 @@ static int imx_pcie_probe(struct platform_device *pdev)  				     "unable to get reset gpio\n");  	gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); -	if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS) -		return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n"); - -	for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) -		imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i]; -  	/* Fetch clocks */ -	req_cnt = imx_pcie->drvdata->clks_cnt - imx_pcie->drvdata->clks_optional_cnt; -	ret = devm_clk_bulk_get(dev, req_cnt, imx_pcie->clks); -	if (ret) -		return ret; -	imx_pcie->clks[req_cnt].clk = devm_clk_get_optional(dev, "ref"); -	if (IS_ERR(imx_pcie->clks[req_cnt].clk)) -		return PTR_ERR(imx_pcie->clks[req_cnt].clk); +	imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks); +	if (imx_pcie->num_clks < 0) +		return dev_err_probe(dev, imx_pcie->num_clks, +				     "failed to get clocks\n");  	if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {  		imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); @@ -1565,8 +1531,11 @@ static int imx_pcie_probe(struct platform_device *pdev)  	switch (imx_pcie->drvdata->variant) {  	case IMX8MQ:  	case IMX8MQ_EP: -		if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) -			imx_pcie->controller_id = 1; +		domain = of_get_pci_domain_nr(node); +		if (domain < 0 || domain > 1) +			return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n"); + +		imx_pcie->controller_id = domain;  		break;  	default:  		break; @@ -1645,6 +1614,7 @@ static int imx_pcie_probe(struct platform_device *pdev)  	if (ret)  		return ret; +	pci->use_parent_dt_ranges = true;  	if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {  		ret = imx_add_pcie_ep(imx_pcie, pdev);  		if (ret < 0) @@ -1675,13 +1645,6 @@ static void imx_pcie_shutdown(struct platform_device *pdev)  	imx_pcie_assert_core_reset(imx_pcie);  } -static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"}; -static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"}; -static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"}; -static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"}; -static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"}; -static const char * const imx95_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux", "ref"}; -  static const struct imx_pcie_drvdata drvdata[] = {  	[IMX6Q] = {  		.variant = IMX6Q, @@ -1691,8 +1654,6 @@ static const struct imx_pcie_drvdata drvdata[] = {  			 IMX_PCIE_FLAG_SUPPORTS_SUSPEND,  		.dbi_length = 0x200,  		.gpr = "fsl,imx6q-iomuxc-gpr", -		.clk_names = imx6q_clks, -		.clks_cnt = ARRAY_SIZE(imx6q_clks),  		.ltssm_off = IOMUXC_GPR12,  		.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,  		.mode_off[0] = IOMUXC_GPR12, @@ -1707,8 +1668,6 @@ static const struct imx_pcie_drvdata drvdata[] = {  			 IMX_PCIE_FLAG_IMX_SPEED_CHANGE |  			 IMX_PCIE_FLAG_SUPPORTS_SUSPEND,  		.gpr = "fsl,imx6q-iomuxc-gpr", -		.clk_names = imx6sx_clks, -		.clks_cnt = ARRAY_SIZE(imx6sx_clks),  		.ltssm_off = IOMUXC_GPR12,  		.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,  		.mode_off[0] = IOMUXC_GPR12, @@ -1725,8 +1684,6 @@ static const struct imx_pcie_drvdata drvdata[] = {  			 IMX_PCIE_FLAG_SUPPORTS_SUSPEND,  		.dbi_length = 0x200,  		.gpr = "fsl,imx6q-iomuxc-gpr", -		.clk_names = imx6q_clks, -		.clks_cnt = ARRAY_SIZE(imx6q_clks),  		.ltssm_off = IOMUXC_GPR12,  		.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,  		.mode_off[0] = IOMUXC_GPR12, @@ -1742,8 +1699,6 @@ static const struct imx_pcie_drvdata drvdata[] = {  			 IMX_PCIE_FLAG_HAS_APP_RESET |  			 IMX_PCIE_FLAG_HAS_PHY_RESET,  		.gpr = "fsl,imx7d-iomuxc-gpr", -		.clk_names = imx6q_clks, -		.clks_cnt = ARRAY_SIZE(imx6q_clks),  		.mode_off[0] = IOMUXC_GPR12,  		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,  		.enable_ref_clk = imx7d_pcie_enable_ref_clk, @@ -1755,8 +1710,6 @@ static const struct imx_pcie_drvdata drvdata[] = {  			 IMX_PCIE_FLAG_HAS_PHY_RESET |  			 IMX_PCIE_FLAG_SUPPORTS_SUSPEND,  		.gpr = "fsl,imx8mq-iomuxc-gpr", -		.clk_names = imx8mq_clks, -		.clks_cnt = ARRAY_SIZE(imx8mq_clks),  		.mode_off[0] = IOMUXC_GPR12,  		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,  		.mode_off[1] = IOMUXC_GPR12, @@ -1770,8 +1723,6 @@ static const struct imx_pcie_drvdata drvdata[] = {  			 IMX_PCIE_FLAG_HAS_PHYDRV |  			 IMX_PCIE_FLAG_HAS_APP_RESET,  		.gpr = "fsl,imx8mm-iomuxc-gpr", -		.clk_names = imx8mm_clks, -		.clks_cnt = ARRAY_SIZE(imx8mm_clks),  		.mode_off[0] = IOMUXC_GPR12,  		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,  		.enable_ref_clk = imx8mm_pcie_enable_ref_clk, @@ -1782,8 +1733,6 @@ static const struct imx_pcie_drvdata drvdata[] = {  			 IMX_PCIE_FLAG_HAS_PHYDRV |  			 IMX_PCIE_FLAG_HAS_APP_RESET,  		.gpr = "fsl,imx8mp-iomuxc-gpr", -		.clk_names = imx8mm_clks, -		.clks_cnt = ARRAY_SIZE(imx8mm_clks),  		.mode_off[0] = IOMUXC_GPR12,  		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,  		.enable_ref_clk = imx8mm_pcie_enable_ref_clk, @@ -1793,17 +1742,12 @@ static const struct imx_pcie_drvdata drvdata[] = {  		.flags = IMX_PCIE_FLAG_HAS_PHYDRV |  			 IMX_PCIE_FLAG_CPU_ADDR_FIXUP |  			 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, -		.clk_names = imx8q_clks, -		.clks_cnt = ARRAY_SIZE(imx8q_clks),  	},  	[IMX95] = {  		.variant = IMX95,  		.flags = IMX_PCIE_FLAG_HAS_SERDES |  			 IMX_PCIE_FLAG_HAS_LUT |  			 IMX_PCIE_FLAG_SUPPORTS_SUSPEND, -		.clk_names = imx95_clks, -		.clks_cnt = ARRAY_SIZE(imx95_clks), -		.clks_optional_cnt = 1,  		.ltssm_off = IMX95_PE0_GEN_CTRL_3,  		.ltssm_mask = IMX95_PCIE_LTSSM_EN,  		.mode_off[0]  = IMX95_PE0_GEN_CTRL_1, @@ -1816,8 +1760,6 @@ static const struct imx_pcie_drvdata drvdata[] = {  			 IMX_PCIE_FLAG_HAS_PHY_RESET,  		.mode = DW_PCIE_EP_TYPE,  		.gpr = "fsl,imx8mq-iomuxc-gpr", -		.clk_names = imx8mq_clks, -		.clks_cnt = ARRAY_SIZE(imx8mq_clks),  		.mode_off[0] = IOMUXC_GPR12,  		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,  		.mode_off[1] = IOMUXC_GPR12, @@ -1832,8 +1774,6 @@ static const struct imx_pcie_drvdata drvdata[] = {  			 IMX_PCIE_FLAG_HAS_PHYDRV,  		.mode = DW_PCIE_EP_TYPE,  		.gpr = "fsl,imx8mm-iomuxc-gpr", -		.clk_names = imx8mm_clks, -		.clks_cnt = ARRAY_SIZE(imx8mm_clks),  		.mode_off[0] = IOMUXC_GPR12,  		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,  		.epc_features = &imx8m_pcie_epc_features, @@ -1845,8 +1785,6 @@ static const struct imx_pcie_drvdata drvdata[] = {  			 IMX_PCIE_FLAG_HAS_PHYDRV,  		.mode = DW_PCIE_EP_TYPE,  		.gpr = "fsl,imx8mp-iomuxc-gpr", -		.clk_names = imx8mm_clks, -		.clks_cnt = ARRAY_SIZE(imx8mm_clks),  		.mode_off[0] = IOMUXC_GPR12,  		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,  		.epc_features = &imx8m_pcie_epc_features, @@ -1857,15 +1795,11 @@ static const struct imx_pcie_drvdata drvdata[] = {  		.flags = IMX_PCIE_FLAG_HAS_PHYDRV,  		.mode = DW_PCIE_EP_TYPE,  		.epc_features = &imx8q_pcie_epc_features, -		.clk_names = imx8q_clks, -		.clks_cnt = ARRAY_SIZE(imx8q_clks),  	},  	[IMX95_EP] = {  		.variant = IMX95_EP,  		.flags = IMX_PCIE_FLAG_HAS_SERDES |  			 IMX_PCIE_FLAG_SUPPORT_64BIT, -		.clk_names = imx8mq_clks, -		.clks_cnt = ARRAY_SIZE(imx8mq_clks),  		.ltssm_off = IMX95_PE0_GEN_CTRL_3,  		.ltssm_mask = IMX95_PCIE_LTSSM_EN,  		.mode_off[0]  = IMX95_PE0_GEN_CTRL_1, diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 63bd5003da45..76a37368ae4f 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -966,11 +966,11 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = {  	.msix_capable = true,  	.bar[BAR_0] = { .type = BAR_RESERVED, },  	.bar[BAR_1] = { .type = BAR_RESERVED, }, -	.bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_2] = { .type = BAR_RESIZABLE, },  	.bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },  	.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, }, -	.bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, -	.align = SZ_1M, +	.bar[BAR_5] = { .type = BAR_RESIZABLE, }, +	.align = SZ_64K,  };  static const struct pci_epc_features* diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index 239a05b36e8e..a44b5c256d6e 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -356,7 +356,7 @@ static int ls_pcie_probe(struct platform_device *pdev)  	if (pcie->drvdata->scfg_support) {  		pcie->scfg =  			syscon_regmap_lookup_by_phandle_args(dev->of_node, -							     "fsl,pcie-scfg", 2, +							     "fsl,pcie-scfg", 1,  							     index);  		if (IS_ERR(pcie->scfg)) {  			dev_err(dev, "No syscfg phandle specified\n"); diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c new file mode 100644 index 000000000000..4eb2a4e8189d --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for AMD MDB PCIe Bridge + * + * Copyright (C) 2024-2025, Advanced Micro Devices, Inc. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +#define AMD_MDB_TLP_IR_STATUS_MISC		0x4C0 +#define AMD_MDB_TLP_IR_MASK_MISC		0x4C4 +#define AMD_MDB_TLP_IR_ENABLE_MISC		0x4C8 +#define AMD_MDB_TLP_IR_DISABLE_MISC		0x4CC + +#define AMD_MDB_TLP_PCIE_INTX_MASK	GENMASK(23, 16) + +#define AMD_MDB_PCIE_INTR_INTX_ASSERT(x)	BIT((x) * 2) + +/* Interrupt registers definitions. */ +#define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT		15 +#define AMD_MDB_PCIE_INTR_INTX			16 +#define AMD_MDB_PCIE_INTR_PM_PME_RCVD		24 +#define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD	25 +#define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE	26 +#define AMD_MDB_PCIE_INTR_NONFATAL		27 +#define AMD_MDB_PCIE_INTR_FATAL			28 + +#define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x) +#define AMD_MDB_PCIE_IMR_ALL_MASK			\ +	(						\ +		IMR(CMPL_TIMEOUT)	|		\ +		IMR(PM_PME_RCVD)	|		\ +		IMR(PME_TO_ACK_RCVD)	|		\ +		IMR(MISC_CORRECTABLE)	|		\ +		IMR(NONFATAL)		|		\ +		IMR(FATAL)		|		\ +		AMD_MDB_TLP_PCIE_INTX_MASK		\ +	) + +/** + * struct amd_mdb_pcie - PCIe port information + * @pci: DesignWare PCIe controller structure + * @slcr: MDB System Level Control and Status Register (SLCR) base + * @intx_domain: INTx IRQ domain pointer + * @mdb_domain: MDB IRQ domain pointer + * @intx_irq: INTx IRQ interrupt number + */ +struct amd_mdb_pcie { +	struct dw_pcie			pci; +	void __iomem			*slcr; +	struct irq_domain		*intx_domain; +	struct irq_domain		*mdb_domain; +	int				intx_irq; +}; + +static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = { +}; + +static void amd_mdb_intx_irq_mask(struct irq_data *data) +{ +	struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data); +	struct dw_pcie *pci = &pcie->pci; +	struct dw_pcie_rp *port = &pci->pp; +	unsigned long flags; +	u32 val; + +	raw_spin_lock_irqsave(&port->lock, flags); +	val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK, +			 AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq)); + +	/* +	 * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that +	 * interrupt, writing '0' has no effect. +	 */ +	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC); +	raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static void amd_mdb_intx_irq_unmask(struct irq_data *data) +{ +	struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data); +	struct dw_pcie *pci = &pcie->pci; +	struct dw_pcie_rp *port = &pci->pp; +	unsigned long flags; +	u32 val; + +	raw_spin_lock_irqsave(&port->lock, flags); +	val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK, +			 AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq)); + +	/* +	 * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that +	 * interrupt, writing '0' has no effect. +	 */ +	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC); +	raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static struct irq_chip amd_mdb_intx_irq_chip = { +	.name		= "AMD MDB INTx", +	.irq_mask	= amd_mdb_intx_irq_mask, +	.irq_unmask	= amd_mdb_intx_irq_unmask, +}; + +/** + * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid + * @domain: IRQ domain + * @irq: Virtual IRQ number + * @hwirq: Hardware interrupt number + * + * Return: Always returns '0'. + */ +static int amd_mdb_pcie_intx_map(struct irq_domain *domain, +				 unsigned int irq, irq_hw_number_t hwirq) +{ +	irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip, +				 handle_level_irq); +	irq_set_chip_data(irq, domain->host_data); +	irq_set_status_flags(irq, IRQ_LEVEL); + +	return 0; +} + +/* INTx IRQ domain operations. */ +static const struct irq_domain_ops amd_intx_domain_ops = { +	.map = amd_mdb_pcie_intx_map, +}; + +static irqreturn_t dw_pcie_rp_intx(int irq, void *args) +{ +	struct amd_mdb_pcie *pcie = args; +	unsigned long val; +	int i, int_status; + +	val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); +	int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val); + +	for (i = 0; i < PCI_NUM_INTX; i++) { +		if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i)) +			generic_handle_domain_irq(pcie->intx_domain, i); +	} + +	return IRQ_HANDLED; +} + +#define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s } + +static const struct { +	const char	*sym; +	const char	*str; +} intr_cause[32] = { +	_IC(CMPL_TIMEOUT,	"Completion timeout"), +	_IC(PM_PME_RCVD,	"PM_PME message received"), +	_IC(PME_TO_ACK_RCVD,	"PME_TO_ACK message received"), +	_IC(MISC_CORRECTABLE,	"Correctable error message"), +	_IC(NONFATAL,		"Non fatal error message"), +	_IC(FATAL,		"Fatal error message"), +}; + +static void amd_mdb_event_irq_mask(struct irq_data *d) +{ +	struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d); +	struct dw_pcie *pci = &pcie->pci; +	struct dw_pcie_rp *port = &pci->pp; +	unsigned long flags; +	u32 val; + +	raw_spin_lock_irqsave(&port->lock, flags); +	val = BIT(d->hwirq); +	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC); +	raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static void amd_mdb_event_irq_unmask(struct irq_data *d) +{ +	struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d); +	struct dw_pcie *pci = &pcie->pci; +	struct dw_pcie_rp *port = &pci->pp; +	unsigned long flags; +	u32 val; + +	raw_spin_lock_irqsave(&port->lock, flags); +	val = BIT(d->hwirq); +	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC); +	raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static struct irq_chip amd_mdb_event_irq_chip = { +	.name		= "AMD MDB RC-Event", +	.irq_mask	= amd_mdb_event_irq_mask, +	.irq_unmask	= amd_mdb_event_irq_unmask, +}; + +static int amd_mdb_pcie_event_map(struct irq_domain *domain, +				  unsigned int irq, irq_hw_number_t hwirq) +{ +	irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip, +				 handle_level_irq); +	irq_set_chip_data(irq, domain->host_data); +	irq_set_status_flags(irq, IRQ_LEVEL); + +	return 0; +} + +static const struct irq_domain_ops event_domain_ops = { +	.map = amd_mdb_pcie_event_map, +}; + +static irqreturn_t amd_mdb_pcie_event(int irq, void *args) +{ +	struct amd_mdb_pcie *pcie = args; +	unsigned long val; +	int i; + +	val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); +	val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC); +	for_each_set_bit(i, &val, 32) +		generic_handle_domain_irq(pcie->mdb_domain, i); +	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + +	return IRQ_HANDLED; +} + +static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie) +{ +	if (pcie->intx_domain) { +		irq_domain_remove(pcie->intx_domain); +		pcie->intx_domain = NULL; +	} + +	if (pcie->mdb_domain) { +		irq_domain_remove(pcie->mdb_domain); +		pcie->mdb_domain = NULL; +	} +} + +static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie) +{ +	unsigned long val; + +	/* Disable all TLP interrupts. */ +	writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK, +		       pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC); + +	/* Clear pending TLP interrupts. */ +	val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); +	val &= AMD_MDB_PCIE_IMR_ALL_MASK; +	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + +	/* Enable all TLP interrupts. */ +	writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK, +		       pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC); + +	return 0; +} + +/** + * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain + * @pcie: PCIe port information + * @pdev: Platform device + * + * Return: Returns '0' on success and error value on failure. + */ +static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie, +					 struct platform_device *pdev) +{ +	struct dw_pcie *pci = &pcie->pci; +	struct dw_pcie_rp *pp = &pci->pp; +	struct device *dev = &pdev->dev; +	struct device_node *node = dev->of_node; +	struct device_node *pcie_intc_node; +	int err; + +	pcie_intc_node = of_get_next_child(node, NULL); +	if (!pcie_intc_node) { +		dev_err(dev, "No PCIe Intc node found\n"); +		return -ENODEV; +	} + +	pcie->mdb_domain = irq_domain_add_linear(pcie_intc_node, 32, +						 &event_domain_ops, pcie); +	if (!pcie->mdb_domain) { +		err = -ENOMEM; +		dev_err(dev, "Failed to add MDB domain\n"); +		goto out; +	} + +	irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS); + +	pcie->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, +						  &amd_intx_domain_ops, pcie); +	if (!pcie->intx_domain) { +		err = -ENOMEM; +		dev_err(dev, "Failed to add INTx domain\n"); +		goto mdb_out; +	} + +	of_node_put(pcie_intc_node); +	irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED); + +	raw_spin_lock_init(&pp->lock); + +	return 0; +mdb_out: +	amd_mdb_pcie_free_irq_domains(pcie); +out: +	of_node_put(pcie_intc_node); +	return err; +} + +static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args) +{ +	struct amd_mdb_pcie *pcie = args; +	struct device *dev; +	struct irq_data *d; + +	dev = pcie->pci.dev; + +	/* +	 * In the future, error reporting will be hooked to the AER subsystem. +	 * Currently, the driver prints a warning message to the user. +	 */ +	d = irq_domain_get_irq_data(pcie->mdb_domain, irq); +	if (intr_cause[d->hwirq].str) +		dev_warn(dev, "%s\n", intr_cause[d->hwirq].str); +	else +		dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq); + +	return IRQ_HANDLED; +} + +static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie, +			     struct platform_device *pdev) +{ +	struct dw_pcie *pci = &pcie->pci; +	struct dw_pcie_rp *pp = &pci->pp; +	struct device *dev = &pdev->dev; +	int i, irq, err; + +	amd_mdb_pcie_init_port(pcie); + +	pp->irq = platform_get_irq(pdev, 0); +	if (pp->irq < 0) +		return pp->irq; + +	for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { +		if (!intr_cause[i].str) +			continue; + +		irq = irq_create_mapping(pcie->mdb_domain, i); +		if (!irq) { +			dev_err(dev, "Failed to map MDB domain interrupt\n"); +			return -ENOMEM; +		} + +		err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler, +				       IRQF_NO_THREAD, intr_cause[i].sym, pcie); +		if (err) { +			dev_err(dev, "Failed to request IRQ %d, err=%d\n", +				irq, err); +			return err; +		} +	} + +	pcie->intx_irq = irq_create_mapping(pcie->mdb_domain, +					    AMD_MDB_PCIE_INTR_INTX); +	if (!pcie->intx_irq) { +		dev_err(dev, "Failed to map INTx interrupt\n"); +		return -ENXIO; +	} + +	err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx, +			       IRQF_NO_THREAD, NULL, pcie); +	if (err) { +		dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n", +			irq, err); +		return err; +	} + +	/* Plug the main event handler. */ +	err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD, +			       "amd_mdb pcie_irq", pcie); +	if (err) { +		dev_err(dev, "Failed to request event IRQ %d, err=%d\n", +			pp->irq, err); +		return err; +	} + +	return 0; +} + +static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie, +				 struct platform_device *pdev) +{ +	struct dw_pcie *pci = &pcie->pci; +	struct dw_pcie_rp *pp = &pci->pp; +	struct device *dev = &pdev->dev; +	int err; + +	pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr"); +	if (IS_ERR(pcie->slcr)) +		return PTR_ERR(pcie->slcr); + +	err = amd_mdb_pcie_init_irq_domains(pcie, pdev); +	if (err) +		return err; + +	err = amd_mdb_setup_irq(pcie, pdev); +	if (err) { +		dev_err(dev, "Failed to set up interrupts, err=%d\n", err); +		goto out; +	} + +	pp->ops = &amd_mdb_pcie_host_ops; + +	err = dw_pcie_host_init(pp); +	if (err) { +		dev_err(dev, "Failed to initialize host, err=%d\n", err); +		goto out; +	} + +	return 0; + +out: +	amd_mdb_pcie_free_irq_domains(pcie); +	return err; +} + +static int amd_mdb_pcie_probe(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct amd_mdb_pcie *pcie; +	struct dw_pcie *pci; + +	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); +	if (!pcie) +		return -ENOMEM; + +	pci = &pcie->pci; +	pci->dev = dev; + +	platform_set_drvdata(pdev, pcie); + +	return amd_mdb_add_pcie_port(pcie, pdev); +} + +static const struct of_device_id amd_mdb_pcie_of_match[] = { +	{ +		.compatible = "amd,versal2-mdb-host", +	}, +	{}, +}; + +static struct platform_driver amd_mdb_pcie_driver = { +	.driver = { +		.name	= "amd-mdb-pcie", +		.of_match_table = amd_mdb_pcie_of_match, +		.suppress_bind_attrs = true, +	}, +	.probe = amd_mdb_pcie_probe, +}; + +builtin_platform_driver(amd_mdb_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c new file mode 100644 index 000000000000..9e6f4d00f262 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c @@ -0,0 +1,677 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe controller debugfs driver + * + * Copyright (C) 2025 Samsung Electronics Co., Ltd. + *		 http://www.samsung.com + * + * Author: Shradha Todi <shradha.t@samsung.com> + */ + +#include <linux/debugfs.h> + +#include "pcie-designware.h" + +#define SD_STATUS_L1LANE_REG		0xb0 +#define PIPE_RXVALID			BIT(18) +#define PIPE_DETECT_LANE		BIT(17) +#define LANE_SELECT			GENMASK(3, 0) + +#define ERR_INJ0_OFF			0x34 +#define EINJ_VAL_DIFF			GENMASK(28, 16) +#define EINJ_VC_NUM			GENMASK(14, 12) +#define EINJ_TYPE_SHIFT			8 +#define EINJ0_TYPE			GENMASK(11, 8) +#define EINJ1_TYPE			BIT(8) +#define EINJ2_TYPE			GENMASK(9, 8) +#define EINJ3_TYPE			GENMASK(10, 8) +#define EINJ4_TYPE			GENMASK(10, 8) +#define EINJ5_TYPE			BIT(8) +#define EINJ_COUNT			GENMASK(7, 0) + +#define ERR_INJ_ENABLE_REG		0x30 + +#define RAS_DES_EVENT_COUNTER_DATA_REG	0xc + +#define RAS_DES_EVENT_COUNTER_CTRL_REG	0x8 +#define EVENT_COUNTER_GROUP_SELECT	GENMASK(27, 24) +#define EVENT_COUNTER_EVENT_SELECT	GENMASK(23, 16) +#define EVENT_COUNTER_LANE_SELECT	GENMASK(11, 8) +#define EVENT_COUNTER_STATUS		BIT(7) +#define EVENT_COUNTER_ENABLE		GENMASK(4, 2) +#define PER_EVENT_ON			0x3 +#define PER_EVENT_OFF			0x1 + +#define DWC_DEBUGFS_BUF_MAX		128 + +/** + * struct dwc_pcie_rasdes_info - Stores controller common information + * @ras_cap_offset: RAS DES vendor specific extended capability offset + * @reg_event_lock: Mutex used for RAS DES shadow event registers + * + * Any parameter constant to all files of the debugfs hierarchy for a single + * controller will be stored in this struct. It is allocated and assigned to + * controller specific struct dw_pcie during initialization. + */ +struct dwc_pcie_rasdes_info { +	u32 ras_cap_offset; +	struct mutex reg_event_lock; +}; + +/** + * struct dwc_pcie_rasdes_priv - Stores file specific private data information + * @pci: Reference to the dw_pcie structure + * @idx: Index of specific file related information in array of structs + * + * All debugfs files will have this struct as its private data. + */ +struct dwc_pcie_rasdes_priv { +	struct dw_pcie *pci; +	int idx; +}; + +/** + * struct dwc_pcie_err_inj - Store details about each error injection + *			     supported by DWC RAS DES + * @name: Name of the error that can be injected + * @err_inj_group: Group number to which the error belongs. The value + *		   can range from 0 to 5 + * @err_inj_type: Each group can have multiple types of error + */ +struct dwc_pcie_err_inj { +	const char *name; +	u32 err_inj_group; +	u32 err_inj_type; +}; + +static const struct dwc_pcie_err_inj err_inj_list[] = { +	{"tx_lcrc", 0x0, 0x0}, +	{"b16_crc_dllp", 0x0, 0x1}, +	{"b16_crc_upd_fc", 0x0, 0x2}, +	{"tx_ecrc", 0x0, 0x3}, +	{"fcrc_tlp", 0x0, 0x4}, +	{"parity_tsos", 0x0, 0x5}, +	{"parity_skpos", 0x0, 0x6}, +	{"rx_lcrc", 0x0, 0x8}, +	{"rx_ecrc", 0x0, 0xb}, +	{"tlp_err_seq", 0x1, 0x0}, +	{"ack_nak_dllp_seq", 0x1, 0x1}, +	{"ack_nak_dllp", 0x2, 0x0}, +	{"upd_fc_dllp", 0x2, 0x1}, +	{"nak_dllp", 0x2, 0x2}, +	{"inv_sync_hdr_sym", 0x3, 0x0}, +	{"com_pad_ts1", 0x3, 0x1}, +	{"com_pad_ts2", 0x3, 0x2}, +	{"com_fts", 0x3, 0x3}, +	{"com_idl", 0x3, 0x4}, +	{"end_edb", 0x3, 0x5}, +	{"stp_sdp", 0x3, 0x6}, +	{"com_skp", 0x3, 0x7}, +	{"posted_tlp_hdr", 0x4, 0x0}, +	{"non_post_tlp_hdr", 0x4, 0x1}, +	{"cmpl_tlp_hdr", 0x4, 0x2}, +	{"posted_tlp_data", 0x4, 0x4}, +	{"non_post_tlp_data", 0x4, 0x5}, +	{"cmpl_tlp_data", 0x4, 0x6}, +	{"duplicate_tlp", 0x5, 0x0}, +	{"nullified_tlp", 0x5, 0x1}, +}; + +static const u32 err_inj_type_mask[] = { +	EINJ0_TYPE, +	EINJ1_TYPE, +	EINJ2_TYPE, +	EINJ3_TYPE, +	EINJ4_TYPE, +	EINJ5_TYPE, +}; + +/** + * struct dwc_pcie_event_counter - Store details about each event counter + *				   supported in DWC RAS DES + * @name: Name of the error counter + * @group_no: Group number that the event belongs to. The value can range + *	      from 0 to 4 + * @event_no: Event number of the particular event. The value ranges are: + *		Group 0: 0 - 10 + *		Group 1: 5 - 13 + *		Group 2: 0 - 7 + *		Group 3: 0 - 5 + *		Group 4: 0 - 1 + */ +struct dwc_pcie_event_counter { +	const char *name; +	u32 group_no; +	u32 event_no; +}; + +static const struct dwc_pcie_event_counter event_list[] = { +	{"ebuf_overflow", 0x0, 0x0}, +	{"ebuf_underrun", 0x0, 0x1}, +	{"decode_err", 0x0, 0x2}, +	{"running_disparity_err", 0x0, 0x3}, +	{"skp_os_parity_err", 0x0, 0x4}, +	{"sync_header_err", 0x0, 0x5}, +	{"rx_valid_deassertion", 0x0, 0x6}, +	{"ctl_skp_os_parity_err", 0x0, 0x7}, +	{"retimer_parity_err_1st", 0x0, 0x8}, +	{"retimer_parity_err_2nd", 0x0, 0x9}, +	{"margin_crc_parity_err", 0x0, 0xA}, +	{"detect_ei_infer", 0x1, 0x5}, +	{"receiver_err", 0x1, 0x6}, +	{"rx_recovery_req", 0x1, 0x7}, +	{"n_fts_timeout", 0x1, 0x8}, +	{"framing_err", 0x1, 0x9}, +	{"deskew_err", 0x1, 0xa}, +	{"framing_err_in_l0", 0x1, 0xc}, +	{"deskew_uncompleted_err", 0x1, 0xd}, +	{"bad_tlp", 0x2, 0x0}, +	{"lcrc_err", 0x2, 0x1}, +	{"bad_dllp", 0x2, 0x2}, +	{"replay_num_rollover", 0x2, 0x3}, +	{"replay_timeout", 0x2, 0x4}, +	{"rx_nak_dllp", 0x2, 0x5}, +	{"tx_nak_dllp", 0x2, 0x6}, +	{"retry_tlp", 0x2, 0x7}, +	{"fc_timeout", 0x3, 0x0}, +	{"poisoned_tlp", 0x3, 0x1}, +	{"ecrc_error", 0x3, 0x2}, +	{"unsupported_request", 0x3, 0x3}, +	{"completer_abort", 0x3, 0x4}, +	{"completion_timeout", 0x3, 0x5}, +	{"ebuf_skp_add", 0x4, 0x0}, +	{"ebuf_skp_del", 0x4, 0x1}, +}; + +static ssize_t lane_detect_read(struct file *file, char __user *buf, +				size_t count, loff_t *ppos) +{ +	struct dw_pcie *pci = file->private_data; +	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; +	char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; +	ssize_t pos; +	u32 val; + +	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG); +	val = FIELD_GET(PIPE_DETECT_LANE, val); +	if (val) +		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n"); +	else +		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n"); + +	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t lane_detect_write(struct file *file, const char __user *buf, +				 size_t count, loff_t *ppos) +{ +	struct dw_pcie *pci = file->private_data; +	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; +	u32 lane, val; + +	val = kstrtou32_from_user(buf, count, 0, &lane); +	if (val) +		return val; + +	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG); +	val &= ~(LANE_SELECT); +	val |= FIELD_PREP(LANE_SELECT, lane); +	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val); + +	return count; +} + +static ssize_t rx_valid_read(struct file *file, char __user *buf, +			     size_t count, loff_t *ppos) +{ +	struct dw_pcie *pci = file->private_data; +	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; +	char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; +	ssize_t pos; +	u32 val; + +	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG); +	val = FIELD_GET(PIPE_RXVALID, val); +	if (val) +		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n"); +	else +		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n"); + +	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t rx_valid_write(struct file *file, const char __user *buf, +			      size_t count, loff_t *ppos) +{ +	return lane_detect_write(file, buf, count, ppos); +} + +static ssize_t err_inj_write(struct file *file, const char __user *buf, +			     size_t count, loff_t *ppos) +{ +	struct dwc_pcie_rasdes_priv *pdata = file->private_data; +	struct dw_pcie *pci = pdata->pci; +	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; +	u32 val, counter, vc_num, err_group, type_mask; +	int val_diff = 0; +	char *kern_buf; + +	err_group = err_inj_list[pdata->idx].err_inj_group; +	type_mask = err_inj_type_mask[err_group]; + +	kern_buf = memdup_user_nul(buf, count); +	if (IS_ERR(kern_buf)) +		return PTR_ERR(kern_buf); + +	if (err_group == 4) { +		val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num); +		if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) { +			kfree(kern_buf); +			return -EINVAL; +		} +	} else if (err_group == 1) { +		val = sscanf(kern_buf, "%u %d", &counter, &val_diff); +		if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) { +			kfree(kern_buf); +			return -EINVAL; +		} +	} else { +		val = kstrtou32(kern_buf, 0, &counter); +		if (val) { +			kfree(kern_buf); +			return val; +		} +	} + +	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group)); +	val &= ~(type_mask | EINJ_COUNT); +	val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask); +	val |= FIELD_PREP(EINJ_COUNT, counter); + +	if (err_group == 1 || err_group == 4) { +		val &= ~(EINJ_VAL_DIFF); +		val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff); +	} +	if (err_group == 4) { +		val &= ~(EINJ_VC_NUM); +		val |= FIELD_PREP(EINJ_VC_NUM, vc_num); +	} + +	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val); +	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group)); + +	kfree(kern_buf); +	return count; +} + +static void set_event_number(struct dwc_pcie_rasdes_priv *pdata, +			     struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo) +{ +	u32 val; + +	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); +	val &= ~EVENT_COUNTER_ENABLE; +	val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT); +	val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no); +	val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no); +	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val); +} + +static ssize_t counter_enable_read(struct file *file, char __user *buf, +				   size_t count, loff_t *ppos) +{ +	struct dwc_pcie_rasdes_priv *pdata = file->private_data; +	struct dw_pcie *pci = pdata->pci; +	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; +	char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; +	ssize_t pos; +	u32 val; + +	mutex_lock(&rinfo->reg_event_lock); +	set_event_number(pdata, pci, rinfo); +	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); +	mutex_unlock(&rinfo->reg_event_lock); +	val = FIELD_GET(EVENT_COUNTER_STATUS, val); +	if (val) +		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n"); +	else +		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n"); + +	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t counter_enable_write(struct file *file, const char __user *buf, +				    size_t count, loff_t *ppos) +{ +	struct dwc_pcie_rasdes_priv *pdata = file->private_data; +	struct dw_pcie *pci = pdata->pci; +	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; +	u32 val, enable; + +	val = kstrtou32_from_user(buf, count, 0, &enable); +	if (val) +		return val; + +	mutex_lock(&rinfo->reg_event_lock); +	set_event_number(pdata, pci, rinfo); +	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); +	if (enable) +		val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON); +	else +		val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF); + +	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val); + +	/* +	 * While enabling the counter, always read the status back to check if +	 * it is enabled or not. Return error if it is not enabled to let the +	 * users know that the counter is not supported on the platform. +	 */ +	if (enable) { +		val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + +					RAS_DES_EVENT_COUNTER_CTRL_REG); +		if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) { +			mutex_unlock(&rinfo->reg_event_lock); +			return -EOPNOTSUPP; +		} +	} + +	mutex_unlock(&rinfo->reg_event_lock); + +	return count; +} + +static ssize_t counter_lane_read(struct file *file, char __user *buf, +				 size_t count, loff_t *ppos) +{ +	struct dwc_pcie_rasdes_priv *pdata = file->private_data; +	struct dw_pcie *pci = pdata->pci; +	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; +	char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; +	ssize_t pos; +	u32 val; + +	mutex_lock(&rinfo->reg_event_lock); +	set_event_number(pdata, pci, rinfo); +	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); +	mutex_unlock(&rinfo->reg_event_lock); +	val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val); +	pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val); + +	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t counter_lane_write(struct file *file, const char __user *buf, +				  size_t count, loff_t *ppos) +{ +	struct dwc_pcie_rasdes_priv *pdata = file->private_data; +	struct dw_pcie *pci = pdata->pci; +	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; +	u32 val, lane; + +	val = kstrtou32_from_user(buf, count, 0, &lane); +	if (val) +		return val; + +	mutex_lock(&rinfo->reg_event_lock); +	set_event_number(pdata, pci, rinfo); +	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); +	val &= ~(EVENT_COUNTER_LANE_SELECT); +	val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane); +	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val); +	mutex_unlock(&rinfo->reg_event_lock); + +	return count; +} + +static ssize_t counter_value_read(struct file *file, char __user *buf, +				  size_t count, loff_t *ppos) +{ +	struct dwc_pcie_rasdes_priv *pdata = file->private_data; +	struct dw_pcie *pci = pdata->pci; +	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; +	char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; +	ssize_t pos; +	u32 val; + +	mutex_lock(&rinfo->reg_event_lock); +	set_event_number(pdata, pci, rinfo); +	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG); +	mutex_unlock(&rinfo->reg_event_lock); +	pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val); + +	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm) +{ +	const char *str; + +	switch (ltssm) { +#define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2); +	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3); +	default: +		str = "DW_PCIE_LTSSM_UNKNOWN"; +		break; +	} + +	return str + strlen("DW_PCIE_LTSSM_"); +} + +static int ltssm_status_show(struct seq_file *s, void *v) +{ +	struct dw_pcie *pci = s->private; +	enum dw_pcie_ltssm val; + +	val = dw_pcie_get_ltssm(pci); +	seq_printf(s, "%s (0x%02x)\n", ltssm_status_string(val), val); + +	return 0; +} + +static int ltssm_status_open(struct inode *inode, struct file *file) +{ +	return single_open(file, ltssm_status_show, inode->i_private); +} + +#define dwc_debugfs_create(name)			\ +debugfs_create_file(#name, 0644, rasdes_debug, pci,	\ +			&dbg_ ## name ## _fops) + +#define DWC_DEBUGFS_FOPS(name)					\ +static const struct file_operations dbg_ ## name ## _fops = {	\ +	.open = simple_open,				\ +	.read = name ## _read,				\ +	.write = name ## _write				\ +} + +DWC_DEBUGFS_FOPS(lane_detect); +DWC_DEBUGFS_FOPS(rx_valid); + +static const struct file_operations dwc_pcie_err_inj_ops = { +	.open = simple_open, +	.write = err_inj_write, +}; + +static const struct file_operations dwc_pcie_counter_enable_ops = { +	.open = simple_open, +	.read = counter_enable_read, +	.write = counter_enable_write, +}; + +static const struct file_operations dwc_pcie_counter_lane_ops = { +	.open = simple_open, +	.read = counter_lane_read, +	.write = counter_lane_write, +}; + +static const struct file_operations dwc_pcie_counter_value_ops = { +	.open = simple_open, +	.read = counter_value_read, +}; + +static const struct file_operations dwc_pcie_ltssm_status_ops = { +	.open = ltssm_status_open, +	.read = seq_read, +}; + +static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci) +{ +	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + +	mutex_destroy(&rinfo->reg_event_lock); +} + +static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir) +{ +	struct dentry *rasdes_debug, *rasdes_err_inj; +	struct dentry *rasdes_event_counter, *rasdes_events; +	struct dwc_pcie_rasdes_info *rasdes_info; +	struct dwc_pcie_rasdes_priv *priv_tmp; +	struct device *dev = pci->dev; +	int ras_cap, i, ret; + +	/* +	 * If a given SoC has no RAS DES capability, the following call is +	 * bound to return an error, breaking some existing platforms. So, +	 * return 0 here, as this is not necessarily an error. +	 */ +	ras_cap = dw_pcie_find_rasdes_capability(pci); +	if (!ras_cap) { +		dev_dbg(dev, "no RAS DES capability available\n"); +		return 0; +	} + +	rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL); +	if (!rasdes_info) +		return -ENOMEM; + +	/* Create subdirectories for Debug, Error Injection, Statistics. */ +	rasdes_debug = debugfs_create_dir("rasdes_debug", dir); +	rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir); +	rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir); + +	mutex_init(&rasdes_info->reg_event_lock); +	rasdes_info->ras_cap_offset = ras_cap; +	pci->debugfs->rasdes_info = rasdes_info; + +	/* Create debugfs files for Debug subdirectory. */ +	dwc_debugfs_create(lane_detect); +	dwc_debugfs_create(rx_valid); + +	/* Create debugfs files for Error Injection subdirectory. */ +	for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) { +		priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL); +		if (!priv_tmp) { +			ret = -ENOMEM; +			goto err_deinit; +		} + +		priv_tmp->idx = i; +		priv_tmp->pci = pci; +		debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp, +				    &dwc_pcie_err_inj_ops); +	} + +	/* Create debugfs files for Statistical Counter subdirectory. */ +	for (i = 0; i < ARRAY_SIZE(event_list); i++) { +		priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL); +		if (!priv_tmp) { +			ret = -ENOMEM; +			goto err_deinit; +		} + +		priv_tmp->idx = i; +		priv_tmp->pci = pci; +		rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter); +		if (event_list[i].group_no == 0 || event_list[i].group_no == 4) { +			debugfs_create_file("lane_select", 0644, rasdes_events, +					    priv_tmp, &dwc_pcie_counter_lane_ops); +		} +		debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp, +				    &dwc_pcie_counter_value_ops); +		debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp, +				    &dwc_pcie_counter_enable_ops); +	} + +	return 0; + +err_deinit: +	dwc_pcie_rasdes_debugfs_deinit(pci); +	return ret; +} + +static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir) +{ +	debugfs_create_file("ltssm_status", 0444, dir, pci, +			    &dwc_pcie_ltssm_status_ops); +} + +void dwc_pcie_debugfs_deinit(struct dw_pcie *pci) +{ +	if (!pci->debugfs) +		return; + +	dwc_pcie_rasdes_debugfs_deinit(pci); +	debugfs_remove_recursive(pci->debugfs->debug_dir); +} + +void dwc_pcie_debugfs_init(struct dw_pcie *pci) +{ +	char dirname[DWC_DEBUGFS_BUF_MAX]; +	struct device *dev = pci->dev; +	struct debugfs_info *debugfs; +	struct dentry *dir; +	int err; + +	/* Create main directory for each platform driver. */ +	snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev)); +	dir = debugfs_create_dir(dirname, NULL); +	debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL); +	if (!debugfs) +		return; + +	debugfs->debug_dir = dir; +	pci->debugfs = debugfs; +	err = dwc_pcie_rasdes_debugfs_init(pci, dir); +	if (err) +		dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n", +			err); + +	dwc_pcie_ltssm_debugfs_init(pci, dir); +} diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 8e07d432e74f..1a0bf9341542 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -102,6 +102,45 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)  	return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);  } +/** + * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list + * @pci: DWC PCI device + * @prev_cap: Capability preceding the capability that should be hidden + * @cap: Capability that should be hidden + * + * Return: 0 if success, errno otherwise. + */ +int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap) +{ +	u16 prev_cap_offset, cap_offset; +	u32 prev_cap_header, cap_header; + +	prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap); +	if (!prev_cap_offset) +		return -EINVAL; + +	prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset); +	cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header); +	cap_header = dw_pcie_readl_dbi(pci, cap_offset); + +	/* cap must immediately follow prev_cap. */ +	if (PCI_EXT_CAP_ID(cap_header) != cap) +		return -EINVAL; + +	/* Clear next ptr. */ +	prev_cap_header &= ~GENMASK(31, 20); + +	/* Set next ptr to next ptr of cap. */ +	prev_cap_header |= cap_header & GENMASK(31, 20); + +	dw_pcie_dbi_ro_wr_en(pci); +	dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header); +	dw_pcie_dbi_ro_wr_dis(pci); + +	return 0; +} +EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability); +  static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  				   struct pci_epf_header *hdr)  { @@ -128,7 +167,7 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  }  static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, -				  dma_addr_t cpu_addr, enum pci_barno bar, +				  dma_addr_t parent_bus_addr, enum pci_barno bar,  				  size_t size)  {  	int ret; @@ -146,7 +185,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,  	}  	ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type, -					  cpu_addr, bar, size); +					  parent_bus_addr, bar, size);  	if (ret < 0) {  		dev_err(pci->dev, "Failed to program IB window\n");  		return ret; @@ -181,7 +220,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,  		return ret;  	set_bit(free_win, ep->ob_window_map); -	ep->outbound_addr[free_win] = atu->cpu_addr; +	ep->outbound_addr[free_win] = atu->parent_bus_addr;  	return 0;  } @@ -205,6 +244,125 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  	ep->bar_to_atu[bar] = 0;  } +static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci, +						enum pci_barno bar) +{ +	u32 reg, bar_index; +	unsigned int offset, nbars; +	int i; + +	offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); +	if (!offset) +		return offset; + +	reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); +	nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; + +	for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) { +		reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); +		bar_index = reg & PCI_REBAR_CTRL_BAR_IDX; +		if (bar_index == bar) +			return offset; +	} + +	return 0; +} + +static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no, +					struct pci_epf_bar *epf_bar) +{ +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	enum pci_barno bar = epf_bar->barno; +	size_t size = epf_bar->size; +	int flags = epf_bar->flags; +	u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); +	unsigned int rebar_offset; +	u32 rebar_cap, rebar_ctrl; +	int ret; + +	rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar); +	if (!rebar_offset) +		return -EINVAL; + +	ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap); +	if (ret) +		return ret; + +	dw_pcie_dbi_ro_wr_en(pci); + +	/* +	 * A BAR mask should not be written for a resizable BAR. The BAR mask +	 * is automatically derived by the controller every time the "selected +	 * size" bits are updated, see "Figure 3-26 Resizable BAR Example for +	 * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write +	 * BIT(0) to set the BAR enable bit. +	 */ +	dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0)); +	dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); + +	if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { +		dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0); +		dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); +	} + +	/* +	 * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes +	 * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes" +	 * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB. +	 */ +	rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL); +	rebar_ctrl &= ~GENMASK(31, 16); +	dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl); + +	/* +	 * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically +	 * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR +	 * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a. +	 */ +	dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap); + +	dw_pcie_dbi_ro_wr_dis(pci); + +	return 0; +} + +static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no, +					   struct pci_epf_bar *epf_bar) +{ +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	enum pci_barno bar = epf_bar->barno; +	size_t size = epf_bar->size; +	int flags = epf_bar->flags; +	u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); + +	dw_pcie_dbi_ro_wr_en(pci); + +	dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1)); +	dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); + +	if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { +		dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1)); +		dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); +	} + +	dw_pcie_dbi_ro_wr_dis(pci); + +	return 0; +} + +static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep, +						     enum pci_barno bar) +{ +	const struct pci_epc_features *epc_features; + +	if (!ep->ops->get_features) +		return BAR_PROGRAMMABLE; + +	epc_features = ep->ops->get_features(ep); + +	return epc_features->bar[bar].type; +} +  static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  			      struct pci_epf_bar *epf_bar)  { @@ -212,9 +370,9 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);  	enum pci_barno bar = epf_bar->barno;  	size_t size = epf_bar->size; +	enum pci_epc_bar_type bar_type;  	int flags = epf_bar->flags;  	int ret, type; -	u32 reg;  	/*  	 * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs @@ -246,19 +404,30 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  		goto config_atu;  	} -	reg = PCI_BASE_ADDRESS_0 + (4 * bar); - -	dw_pcie_dbi_ro_wr_en(pci); - -	dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1)); -	dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); - -	if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { -		dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1)); -		dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); +	bar_type = dw_pcie_ep_get_bar_type(ep, bar); +	switch (bar_type) { +	case BAR_FIXED: +		/* +		 * There is no need to write a BAR mask for a fixed BAR (except +		 * to write 1 to the LSB of the BAR mask register, to enable the +		 * BAR). Write the BAR mask regardless. (The fixed bits in the +		 * BAR mask register will be read-only anyway.) +		 */ +		fallthrough; +	case BAR_PROGRAMMABLE: +		ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar); +		break; +	case BAR_RESIZABLE: +		ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar); +		break; +	default: +		ret = -EINVAL; +		dev_err(pci->dev, "Invalid BAR type\n"); +		break;  	} -	dw_pcie_dbi_ro_wr_dis(pci); +	if (ret) +		return ret;  config_atu:  	if (!(flags & PCI_BASE_ADDRESS_SPACE)) @@ -282,7 +451,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,  	u32 index;  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); -	for (index = 0; index < pci->num_ob_windows; index++) { +	for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {  		if (ep->outbound_addr[index] != addr)  			continue;  		*atu_index = index; @@ -314,7 +483,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); -	ret = dw_pcie_find_index(ep, addr, &atu_index); +	ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset, +				 &atu_index);  	if (ret < 0)  		return; @@ -333,7 +503,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  	atu.func_no = func_no;  	atu.type = PCIE_ATU_TYPE_MEM; -	atu.cpu_addr = addr; +	atu.parent_bus_addr = addr - pci->parent_bus_offset;  	atu.pci_addr = pci_addr;  	atu.size = size;  	ret = dw_pcie_ep_outbound_atu(ep, &atu); @@ -666,6 +836,7 @@ void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)  {  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	dwc_pcie_debugfs_deinit(pci);  	dw_pcie_edma_remove(pci);  }  EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup); @@ -690,31 +861,15 @@ void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)  }  EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit); -static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) -{ -	u32 header; -	int pos = PCI_CFG_SPACE_SIZE; - -	while (pos) { -		header = dw_pcie_readl_dbi(pci, pos); -		if (PCI_EXT_CAP_ID(header) == cap) -			return pos; - -		pos = PCI_EXT_CAP_NEXT(header); -		if (!pos) -			break; -	} - -	return 0; -} -  static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)  { +	struct dw_pcie_ep *ep = &pci->ep;  	unsigned int offset;  	unsigned int nbars; -	u32 reg, i; +	enum pci_barno bar; +	u32 reg, i, val; -	offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); +	offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);  	dw_pcie_dbi_ro_wr_en(pci); @@ -727,9 +882,29 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)  		 * PCIe r6.0, sec 7.8.6.2 require us to support at least one  		 * size in the range from 1 MB to 512 GB. Advertise support  		 * for 1 MB BAR size only. +		 * +		 * For a BAR that has been configured via dw_pcie_ep_set_bar(), +		 * advertise support for only that size instead.  		 */ -		for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) -			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4)); +		for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) { +			/* +			 * While the RESBAR_CAP_REG_* fields are sticky, the +			 * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is +			 * sticky in certain versions of DWC PCIe, but not all). +			 * +			 * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by +			 * the controller when RESBAR_CAP_REG is written, which +			 * is why RESBAR_CAP_REG is written here. +			 */ +			val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); +			bar = val & PCI_REBAR_CTRL_BAR_IDX; +			if (ep->epf_bar[bar]) +				pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val); +			else +				val = BIT(4); + +			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val); +		}  	}  	dw_pcie_setup(pci); @@ -773,6 +948,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)  	if (ret)  		return ret; +	ret = -ENOMEM;  	if (!ep->ib_window_map) {  		ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,  						       GFP_KERNEL); @@ -817,7 +993,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)  	if (ep->ops->init)  		ep->ops->init(ep); -	ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); +	ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);  	/*  	 * PTM responder capability can be disabled only after disabling @@ -837,6 +1013,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)  	dw_pcie_ep_init_non_sticky_registers(pci); +	dwc_pcie_debugfs_init(pci); +  	return 0;  err_remove_edma: @@ -883,26 +1061,15 @@ void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)  }  EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown); -/** - * dw_pcie_ep_init - Initialize the endpoint device - * @ep: DWC EP device - * - * Initialize the endpoint device. Allocate resources and create the EPC - * device with the endpoint framework. - * - * Return: 0 if success, errno otherwise. - */ -int dw_pcie_ep_init(struct dw_pcie_ep *ep) +static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)  { -	int ret; -	struct resource *res; -	struct pci_epc *epc;  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);  	struct device *dev = pci->dev;  	struct platform_device *pdev = to_platform_device(dev);  	struct device_node *np = dev->of_node; - -	INIT_LIST_HEAD(&ep->func_list); +	struct pci_epc *epc = ep->epc; +	struct resource *res; +	int ret;  	ret = dw_pcie_get_resources(pci);  	if (ret) @@ -915,8 +1082,37 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)  	ep->phys_base = res->start;  	ep->addr_size = resource_size(res); -	if (ep->ops->pre_init) -		ep->ops->pre_init(ep); +	/* +	 * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call +	 * dw_pcie_parent_bus_offset() after setting ep->phys_base. +	 */ +	pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space", +							   ep->phys_base); + +	ret = of_property_read_u8(np, "max-functions", &epc->max_functions); +	if (ret < 0) +		epc->max_functions = 1; + +	return 0; +} + +/** + * dw_pcie_ep_init - Initialize the endpoint device + * @ep: DWC EP device + * + * Initialize the endpoint device. Allocate resources and create the EPC + * device with the endpoint framework. + * + * Return: 0 if success, errno otherwise. + */ +int dw_pcie_ep_init(struct dw_pcie_ep *ep) +{ +	int ret; +	struct pci_epc *epc; +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	struct device *dev = pci->dev; + +	INIT_LIST_HEAD(&ep->func_list);  	epc = devm_pci_epc_create(dev, &epc_ops);  	if (IS_ERR(epc)) { @@ -927,9 +1123,12 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)  	ep->epc = epc;  	epc_set_drvdata(epc, ep); -	ret = of_property_read_u8(np, "max-functions", &epc->max_functions); -	if (ret < 0) -		epc->max_functions = 1; +	ret = dw_pcie_ep_get_resources(ep); +	if (ret) +		return ret; + +	if (ep->ops->pre_init) +		ep->ops->pre_init(ep);  	ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,  			       ep->page_size); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index ffaded8f2df7..ecc33f6789e3 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -418,19 +418,15 @@ static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)  	}  } -int dw_pcie_host_init(struct dw_pcie_rp *pp) +static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct device *dev = pci->dev; -	struct device_node *np = dev->of_node;  	struct platform_device *pdev = to_platform_device(dev);  	struct resource_entry *win; -	struct pci_host_bridge *bridge;  	struct resource *res;  	int ret; -	raw_spin_lock_init(&pp->lock); -  	ret = dw_pcie_get_resources(pci);  	if (ret)  		return ret; @@ -448,20 +444,43 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)  	if (IS_ERR(pp->va_cfg0_base))  		return PTR_ERR(pp->va_cfg0_base); -	bridge = devm_pci_alloc_host_bridge(dev, 0); -	if (!bridge) -		return -ENOMEM; - -	pp->bridge = bridge; -  	/* Get the I/O range from DT */ -	win = resource_list_first_type(&bridge->windows, IORESOURCE_IO); +	win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);  	if (win) {  		pp->io_size = resource_size(win->res);  		pp->io_bus_addr = win->res->start - win->offset;  		pp->io_base = pci_pio_to_address(win->res->start);  	} +	/* +	 * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to +	 * call dw_pcie_parent_bus_offset() after setting pp->io_base. +	 */ +	pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config", +							   pp->cfg0_base); +	return 0; +} + +int dw_pcie_host_init(struct dw_pcie_rp *pp) +{ +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct device *dev = pci->dev; +	struct device_node *np = dev->of_node; +	struct pci_host_bridge *bridge; +	int ret; + +	raw_spin_lock_init(&pp->lock); + +	bridge = devm_pci_alloc_host_bridge(dev, 0); +	if (!bridge) +		return -ENOMEM; + +	pp->bridge = bridge; + +	ret = dw_pcie_host_get_resources(pp); +	if (ret) +		return ret; +  	/* Set default bus ops */  	bridge->ops = &dw_pcie_ops;  	bridge->child_ops = &dw_child_pcie_ops; @@ -548,6 +567,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)  	if (pp->ops->post_init)  		pp->ops->post_init(pp); +	dwc_pcie_debugfs_init(pci); +  	return 0;  err_stop_link: @@ -572,6 +593,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	dwc_pcie_debugfs_deinit(pci); +  	pci_stop_root_bus(pp->bridge->bus);  	pci_remove_root_bus(pp->bridge->bus); @@ -616,7 +639,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,  		type = PCIE_ATU_TYPE_CFG1;  	atu.type = type; -	atu.cpu_addr = pp->cfg0_base; +	atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;  	atu.pci_addr = busdev;  	atu.size = pp->cfg0_size; @@ -641,7 +664,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,  	if (pp->cfg0_io_shared) {  		atu.type = PCIE_ATU_TYPE_IO; -		atu.cpu_addr = pp->io_base; +		atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;  		atu.pci_addr = pp->io_bus_addr;  		atu.size = pp->io_size; @@ -667,7 +690,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,  	if (pp->cfg0_io_shared) {  		atu.type = PCIE_ATU_TYPE_IO; -		atu.cpu_addr = pp->io_base; +		atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;  		atu.pci_addr = pp->io_bus_addr;  		atu.size = pp->io_size; @@ -736,7 +759,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)  		atu.index = i;  		atu.type = PCIE_ATU_TYPE_MEM; -		atu.cpu_addr = entry->res->start; +		atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;  		atu.pci_addr = entry->res->start - entry->offset;  		/* Adjust iATU size if MSG TLP region was allocated before */ @@ -758,7 +781,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)  		if (pci->num_ob_windows > ++i) {  			atu.index = i;  			atu.type = PCIE_ATU_TYPE_IO; -			atu.cpu_addr = pp->io_base; +			atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;  			atu.pci_addr = pp->io_bus_addr;  			atu.size = pp->io_size; @@ -902,13 +925,13 @@ static int dw_pcie_pme_turn_off(struct dw_pcie *pci)  	atu.size = resource_size(pci->pp.msg_res);  	atu.index = pci->pp.msg_atu_index; -	atu.cpu_addr = pci->pp.msg_res->start; +	atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;  	ret = dw_pcie_prog_outbound_atu(pci, &atu);  	if (ret)  		return ret; -	mem = ioremap(atu.cpu_addr, pci->region_align); +	mem = ioremap(pci->pp.msg_res->start, pci->region_align);  	if (!mem)  		return -ENOMEM; diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 145e7f579072..97d76d3dc066 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -16,6 +16,8 @@  #include <linux/gpio/consumer.h>  #include <linux/ioport.h>  #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/pcie-dwc.h>  #include <linux/platform_device.h>  #include <linux/sizes.h>  #include <linux/types.h> @@ -283,6 +285,51 @@ u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)  }  EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability); +static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id, +					  u16 vsec_id) +{ +	u16 vsec = 0; +	u32 header; + +	if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID)) +		return 0; + +	while ((vsec = dw_pcie_find_next_ext_capability(pci, vsec, +						       PCI_EXT_CAP_ID_VNDR))) { +		header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER); +		if (PCI_VNDR_HEADER_ID(header) == vsec_id) +			return vsec; +	} + +	return 0; +} + +static u16 dw_pcie_find_vsec_capability(struct dw_pcie *pci, +					const struct dwc_pcie_vsec_id *vsec_ids) +{ +	const struct dwc_pcie_vsec_id *vid; +	u16 vsec; +	u32 header; + +	for (vid = vsec_ids; vid->vendor_id; vid++) { +		vsec = __dw_pcie_find_vsec_capability(pci, vid->vendor_id, +						      vid->vsec_id); +		if (vsec) { +			header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER); +			if (PCI_VNDR_HEADER_REV(header) == vid->vsec_rev) +				return vsec; +		} +	} + +	return 0; +} + +u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci) +{ +	return dw_pcie_find_vsec_capability(pci, dwc_pcie_rasdes_vsec_ids); +} +EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability); +  int dw_pcie_read(void __iomem *addr, int size, u32 *val)  {  	if (!IS_ALIGNED((uintptr_t)addr, size)) { @@ -470,25 +517,22 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)  int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,  			      const struct dw_pcie_ob_atu_cfg *atu)  { -	u64 cpu_addr = atu->cpu_addr; +	u64 parent_bus_addr = atu->parent_bus_addr;  	u32 retries, val;  	u64 limit_addr; -	if (pci->ops && pci->ops->cpu_addr_fixup) -		cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); - -	limit_addr = cpu_addr + atu->size - 1; +	limit_addr = parent_bus_addr + atu->size - 1; -	if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) || -	    !IS_ALIGNED(cpu_addr, pci->region_align) || +	if ((limit_addr & ~pci->region_limit) != (parent_bus_addr & ~pci->region_limit) || +	    !IS_ALIGNED(parent_bus_addr, pci->region_align) ||  	    !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {  		return -EINVAL;  	}  	dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE, -			      lower_32_bits(cpu_addr)); +			      lower_32_bits(parent_bus_addr));  	dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE, -			      upper_32_bits(cpu_addr)); +			      upper_32_bits(parent_bus_addr));  	dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,  			      lower_32_bits(limit_addr)); @@ -502,7 +546,7 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,  			      upper_32_bits(atu->pci_addr));  	val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no); -	if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) && +	if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) &&  	    dw_pcie_ver_is_ge(pci, 460A))  		val |= PCIE_ATU_INCREASE_REGION_SIZE;  	if (dw_pcie_ver_is(pci, 490A)) @@ -545,13 +589,13 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg  }  int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, -			     u64 cpu_addr, u64 pci_addr, u64 size) +			     u64 parent_bus_addr, u64 pci_addr, u64 size)  {  	u64 limit_addr = pci_addr + size - 1;  	u32 retries, val;  	if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) || -	    !IS_ALIGNED(cpu_addr, pci->region_align) || +	    !IS_ALIGNED(parent_bus_addr, pci->region_align) ||  	    !IS_ALIGNED(pci_addr, pci->region_align) || !size) {  		return -EINVAL;  	} @@ -568,9 +612,9 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,  				      upper_32_bits(limit_addr));  	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, -			      lower_32_bits(cpu_addr)); +			      lower_32_bits(parent_bus_addr));  	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, -			      upper_32_bits(cpu_addr)); +			      upper_32_bits(parent_bus_addr));  	val = type;  	if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) && @@ -597,18 +641,18 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,  }  int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, -				int type, u64 cpu_addr, u8 bar, size_t size) +				int type, u64 parent_bus_addr, u8 bar, size_t size)  {  	u32 retries, val; -	if (!IS_ALIGNED(cpu_addr, pci->region_align) || -	    !IS_ALIGNED(cpu_addr, size)) +	if (!IS_ALIGNED(parent_bus_addr, pci->region_align) || +	    !IS_ALIGNED(parent_bus_addr, size))  		return -EINVAL;  	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, -			      lower_32_bits(cpu_addr)); +			      lower_32_bits(parent_bus_addr));  	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, -			      upper_32_bits(cpu_addr)); +			      upper_32_bits(parent_bus_addr));  	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |  			      PCIE_ATU_FUNC_NUM(func_no)); @@ -1105,3 +1149,63 @@ void dw_pcie_setup(struct dw_pcie *pci)  	dw_pcie_link_set_max_link_width(pci, pci->num_lanes);  } + +resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci, +					  const char *reg_name, +					  resource_size_t cpu_phys_addr) +{ +	struct device *dev = pci->dev; +	struct device_node *np = dev->of_node; +	int index; +	u64 reg_addr, fixup_addr; +	u64 (*fixup)(struct dw_pcie *pcie, u64 cpu_addr); + +	/* Look up reg_name address on parent bus */ +	index = of_property_match_string(np, "reg-names", reg_name); + +	if (index < 0) { +		dev_err(dev, "No %s in devicetree \"reg\" property\n", reg_name); +		return 0; +	} + +	of_property_read_reg(np, index, ®_addr, NULL); + +	fixup = pci->ops ? pci->ops->cpu_addr_fixup : NULL; +	if (fixup) { +		fixup_addr = fixup(pci, cpu_phys_addr); +		if (reg_addr == fixup_addr) { +			dev_info(dev, "%s reg[%d] %#010llx == %#010llx == fixup(cpu %#010llx); %ps is redundant with this devicetree\n", +				 reg_name, index, reg_addr, fixup_addr, +				 (unsigned long long) cpu_phys_addr, fixup); +		} else { +			dev_warn(dev, "%s reg[%d] %#010llx != %#010llx == fixup(cpu %#010llx); devicetree is broken\n", +				 reg_name, index, reg_addr, fixup_addr, +				 (unsigned long long) cpu_phys_addr); +			reg_addr = fixup_addr; +		} + +		return cpu_phys_addr - reg_addr; +	} + +	if (pci->use_parent_dt_ranges) { + +		/* +		 * This platform once had a fixup, presumably because it +		 * translates between CPU and PCI controller addresses. +		 * Log a note if devicetree didn't describe a translation. +		 */ +		if (reg_addr == cpu_phys_addr) +			dev_info(dev, "%s reg[%d] %#010llx == cpu %#010llx\n; no fixup was ever needed for this devicetree\n", +				 reg_name, index, reg_addr, +				 (unsigned long long) cpu_phys_addr); +	} else { +		if (reg_addr != cpu_phys_addr) { +			dev_warn(dev, "%s reg[%d] %#010llx != cpu %#010llx; no fixup and devicetree \"ranges\" is broken, assuming no translation\n", +				 reg_name, index, reg_addr, +				 (unsigned long long) cpu_phys_addr); +			return 0; +		} +	} + +	return cpu_phys_addr - reg_addr; +} diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 501d9ddfea16..56aafdbcdaca 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -330,9 +330,40 @@ enum dw_pcie_ltssm {  	/* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */  	DW_PCIE_LTSSM_DETECT_QUIET = 0x0,  	DW_PCIE_LTSSM_DETECT_ACT = 0x1, +	DW_PCIE_LTSSM_POLL_ACTIVE = 0x2, +	DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3, +	DW_PCIE_LTSSM_POLL_CONFIG = 0x4, +	DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5,  	DW_PCIE_LTSSM_DETECT_WAIT = 0x6, +	DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7, +	DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8, +	DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9, +	DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa, +	DW_PCIE_LTSSM_CFG_COMPLETE = 0xb, +	DW_PCIE_LTSSM_CFG_IDLE = 0xc, +	DW_PCIE_LTSSM_RCVRY_LOCK = 0xd, +	DW_PCIE_LTSSM_RCVRY_SPEED = 0xe, +	DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf, +	DW_PCIE_LTSSM_RCVRY_IDLE = 0x10,  	DW_PCIE_LTSSM_L0 = 0x11, +	DW_PCIE_LTSSM_L0S = 0x12, +	DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13, +	DW_PCIE_LTSSM_L1_IDLE = 0x14,  	DW_PCIE_LTSSM_L2_IDLE = 0x15, +	DW_PCIE_LTSSM_L2_WAKE = 0x16, +	DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17, +	DW_PCIE_LTSSM_DISABLED_IDLE = 0x18, +	DW_PCIE_LTSSM_DISABLED = 0x19, +	DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a, +	DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b, +	DW_PCIE_LTSSM_LPBK_EXIT = 0x1c, +	DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d, +	DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e, +	DW_PCIE_LTSSM_HOT_RESET = 0x1f, +	DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20, +	DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21, +	DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22, +	DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23,  	DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,  }; @@ -343,7 +374,7 @@ struct dw_pcie_ob_atu_cfg {  	u8 func_no;  	u8 code;  	u8 routing; -	u64 cpu_addr; +	u64 parent_bus_addr;  	u64 pci_addr;  	u64 size;  }; @@ -437,6 +468,11 @@ struct dw_pcie_ops {  	void	(*stop_link)(struct dw_pcie *pcie);  }; +struct debugfs_info { +	struct dentry		*debug_dir; +	void			*rasdes_info; +}; +  struct dw_pcie {  	struct device		*dev;  	void __iomem		*dbi_base; @@ -445,6 +481,7 @@ struct dw_pcie {  	void __iomem		*atu_base;  	resource_size_t		atu_phys_addr;  	size_t			atu_size; +	resource_size_t		parent_bus_offset;  	u32			num_ib_windows;  	u32			num_ob_windows;  	u32			region_align; @@ -465,6 +502,20 @@ struct dw_pcie {  	struct reset_control_bulk_data	core_rsts[DW_PCIE_NUM_CORE_RSTS];  	struct gpio_desc		*pe_rst;  	bool			suspended; +	struct debugfs_info	*debugfs; + +	/* +	 * If iATU input addresses are offset from CPU physical addresses, +	 * we previously required .cpu_addr_fixup() to convert them.  We +	 * now rely on the devicetree instead.  If .cpu_addr_fixup() +	 * exists, we compare its results with devicetree. +	 * +	 * If .cpu_addr_fixup() does not exist, we assume the offset is +	 * zero and warn if devicetree claims otherwise.  If we know all +	 * devicetrees correctly describe the offset, set +	 * use_parent_dt_ranges to true to avoid this warning. +	 */ +	bool			use_parent_dt_ranges;  };  #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) @@ -478,6 +529,7 @@ void dw_pcie_version_detect(struct dw_pcie *pci);  u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);  u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap); +u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);  int dw_pcie_read(void __iomem *addr, int size, u32 *val);  int dw_pcie_write(void __iomem *addr, int size, u32 val); @@ -491,14 +543,18 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci);  int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,  			      const struct dw_pcie_ob_atu_cfg *atu);  int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, -			     u64 cpu_addr, u64 pci_addr, u64 size); +			     u64 parent_bus_addr, u64 pci_addr, u64 size);  int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, -				int type, u64 cpu_addr, u8 bar, size_t size); +				int type, u64 parent_bus_addr, +				u8 bar, size_t size);  void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);  void dw_pcie_setup(struct dw_pcie *pci);  void dw_pcie_iatu_detect(struct dw_pcie *pci);  int dw_pcie_edma_detect(struct dw_pcie *pci);  void dw_pcie_edma_remove(struct dw_pcie *pci); +resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci, +					  const char *reg_name, +					  resource_size_t cpu_phy_addr);  static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)  { @@ -743,6 +799,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,  int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,  				       u16 interrupt_num);  void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar); +int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap);  struct dw_pcie_ep_func *  dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);  #else @@ -800,10 +857,29 @@ static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)  {  } +static inline int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, +						 u8 prev_cap, u8 cap) +{ +	return 0; +} +  static inline struct dw_pcie_ep_func *  dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)  {  	return NULL;  }  #endif + +#ifdef CONFIG_PCIE_DW_DEBUGFS +void dwc_pcie_debugfs_init(struct dw_pcie *pci); +void dwc_pcie_debugfs_deinit(struct dw_pcie *pci); +#else +static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci) +{ +} +static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci) +{ +} +#endif +  #endif /* _PCIE_DESIGNWARE_H */ diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index 93698abff4d9..c624b7ebd118 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -240,6 +240,34 @@ static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {  	.init = rockchip_pcie_host_init,  }; +/* + * ATS does not work on RK3588 when running in EP mode. + * + * After the host has enabled ATS on the EP side, it will send an IOTLB + * invalidation request to the EP side. However, the RK3588 will never send + * a completion back and eventually the host will print an IOTLB_INV_TIMEOUT + * error, and the EP will not be operational. If we hide the ATS capability, + * things work as expected. + */ +static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep) +{ +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	struct device *dev = pci->dev; + +	/* Only hide the ATS capability for RK3588 running in EP mode. */ +	if (!of_device_is_compatible(dev->of_node, "rockchip,rk3588-pcie-ep")) +		return; + +	if (dw_pcie_ep_hide_ext_capability(pci, PCI_EXT_CAP_ID_SECPCI, +					   PCI_EXT_CAP_ID_ATS)) +		dev_err(dev, "failed to hide ATS capability\n"); +} + +static void rockchip_pcie_ep_pre_init(struct dw_pcie_ep *ep) +{ +	rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep); +} +  static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)  {  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -272,13 +300,14 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {  	.linkup_notifier = true,  	.msi_capable = true,  	.msix_capable = true, +	.intx_capable = false,  	.align = SZ_64K, -	.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, -	.bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, -	.bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, -	.bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, -	.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, -	.bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_0] = { .type = BAR_RESIZABLE, }, +	.bar[BAR_1] = { .type = BAR_RESIZABLE, }, +	.bar[BAR_2] = { .type = BAR_RESIZABLE, }, +	.bar[BAR_3] = { .type = BAR_RESIZABLE, }, +	.bar[BAR_4] = { .type = BAR_RESIZABLE, }, +	.bar[BAR_5] = { .type = BAR_RESIZABLE, },  };  /* @@ -292,13 +321,14 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {  	.linkup_notifier = true,  	.msi_capable = true,  	.msix_capable = true, +	.intx_capable = false,  	.align = SZ_64K, -	.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, -	.bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, -	.bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, -	.bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_0] = { .type = BAR_RESIZABLE, }, +	.bar[BAR_1] = { .type = BAR_RESIZABLE, }, +	.bar[BAR_2] = { .type = BAR_RESIZABLE, }, +	.bar[BAR_3] = { .type = BAR_RESIZABLE, },  	.bar[BAR_4] = { .type = BAR_RESERVED, }, -	.bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, +	.bar[BAR_5] = { .type = BAR_RESIZABLE, },  };  static const struct pci_epc_features * @@ -312,6 +342,7 @@ rockchip_pcie_get_features(struct dw_pcie_ep *ep)  static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = {  	.init = rockchip_pcie_ep_init, +	.pre_init = rockchip_pcie_ep_pre_init,  	.raise_irq = rockchip_pcie_raise_irq,  	.get_features = rockchip_pcie_get_features,  }; diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 615a0e3e6d7e..1f2f4c28a949 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -409,16 +409,21 @@ static int histb_pcie_probe(struct platform_device *pdev)  	ret = histb_pcie_host_enable(pp);  	if (ret) {  		dev_err(dev, "failed to enable host\n"); -		return ret; +		goto err_exit_phy;  	}  	ret = dw_pcie_host_init(pp);  	if (ret) {  		dev_err(dev, "failed to initialize host\n"); -		return ret; +		goto err_exit_phy;  	}  	return 0; + +err_exit_phy: +	phy_exit(hipcie->phy); + +	return ret;  }  static void histb_pcie_remove(struct platform_device *pdev) @@ -427,8 +432,7 @@ static void histb_pcie_remove(struct platform_device *pdev)  	histb_pcie_host_disable(hipcie); -	if (hipcie->phy) -		phy_exit(hipcie->phy); +	phy_exit(hipcie->phy);  }  static const struct of_device_id histb_pcie_of_match[] = { diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index 9b53b8f6f268..c21906eced61 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -57,7 +57,6 @@  	PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \  	PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD) -#define BUS_IATU_OFFSET			SZ_256M  #define RESET_INTERVAL_MS		100  struct intel_pcie { @@ -381,13 +380,7 @@ static int intel_pcie_rc_init(struct dw_pcie_rp *pp)  	return intel_pcie_host_setup(pcie);  } -static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr) -{ -	return cpu_addr + BUS_IATU_OFFSET; -} -  static const struct dw_pcie_ops intel_pcie_ops = { -	.cpu_addr_fixup = intel_pcie_cpu_addr,  };  static const struct dw_pcie_host_ops intel_pcie_dw_ops = { @@ -409,6 +402,7 @@ static int intel_pcie_probe(struct platform_device *pdev)  	platform_set_drvdata(pdev, pcie);  	pci = &pcie->pci;  	pci->dev = dev; +	pci->use_parent_dt_ranges = true;  	pp = &pci->pp;  	ret = intel_pcie_get_resources(pdev); diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 1b2088acb538..d0e6a3811b00 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -216,10 +216,9 @@ static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)  	usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);  	reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0); -	if (reg_val & PIPE_CLK_STABLE) { -		dev_err(dev, "PIPE clk is not stable\n"); -		return -EINVAL; -	} +	if (reg_val & PIPE_CLK_STABLE) +		return dev_err_probe(dev, -ETIMEDOUT, +				     "PIPE clk is not stable\n");  	return 0;  } @@ -371,10 +370,9 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,  	if (ret < 0)  		return 0; -	if (ret > MAX_PCI_SLOTS) { -		dev_err(dev, "Too many GPIO clock requests!\n"); -		return -EINVAL; -	} +	if (ret > MAX_PCI_SLOTS) +		return dev_err_probe(dev, -EINVAL, +				     "Too many GPIO clock requests!\n");  	pcie->n_gpio_clkreq = ret; @@ -420,17 +418,16 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,  						     "unable to get a valid reset gpio\n");  			} -			if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) { -				dev_err(dev, "Too many PCI slots!\n"); -				return -EINVAL; -			} +			if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) +				return dev_err_probe(dev, -EINVAL, +						     "Too many PCI slots!\n"); +  			pcie->num_slots++;  			ret = of_pci_get_devfn(child); -			if (ret < 0) { -				dev_err(dev, "failed to parse devfn: %d\n", ret); -				return ret; -			} +			if (ret < 0) +				return dev_err_probe(dev, ret, +						     "failed to parse devfn\n");  			slot = PCI_SLOT(ret); @@ -452,7 +449,7 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,  				    struct platform_device *pdev)  {  	struct device *dev = &pdev->dev; -	struct device_node *child, *node = dev->of_node; +	struct device_node *node = dev->of_node;  	void __iomem *apb_base;  	int ret; @@ -477,17 +474,13 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,  		return ret;  	/* Parse OF children */ -	for_each_available_child_of_node(node, child) { +	for_each_available_child_of_node_scoped(node, child) {  		ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);  		if (ret) -			goto put_node; +			return ret;  	}  	return 0; - -put_node: -	of_node_put(child); -	return ret;  }  static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, @@ -729,16 +722,9 @@ static int kirin_pcie_probe(struct platform_device *pdev)  	struct dw_pcie *pci;  	int ret; -	if (!dev->of_node) { -		dev_err(dev, "NULL node\n"); -		return -EINVAL; -	} -  	data = of_device_get_match_data(dev); -	if (!data) { -		dev_err(dev, "OF data missing\n"); -		return -EINVAL; -	} +	if (!data) +		return dev_err_probe(dev, -EINVAL, "OF data missing\n");  	kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);  	if (!kirin_pcie) diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index c08f64d7a825..46b1c6d19974 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -48,7 +48,7 @@  #define PARF_DBI_BASE_ADDR_HI			0x354  #define PARF_SLV_ADDR_SPACE_SIZE		0x358  #define PARF_SLV_ADDR_SPACE_SIZE_HI		0x35c -#define PARF_NO_SNOOP_OVERIDE			0x3d4 +#define PARF_NO_SNOOP_OVERRIDE			0x3d4  #define PARF_ATU_BASE_ADDR			0x634  #define PARF_ATU_BASE_ADDR_HI			0x638  #define PARF_SRIS_MODE				0x644 @@ -89,9 +89,9 @@  #define PARF_DEBUG_INT_CFG_BUS_MASTER_EN	BIT(2)  #define PARF_DEBUG_INT_RADM_PM_TURNOFF		BIT(3) -/* PARF_NO_SNOOP_OVERIDE register fields */ -#define WR_NO_SNOOP_OVERIDE_EN                 BIT(1) -#define RD_NO_SNOOP_OVERIDE_EN                 BIT(3) +/* PARF_NO_SNOOP_OVERRIDE register fields */ +#define WR_NO_SNOOP_OVERRIDE_EN			BIT(1) +#define RD_NO_SNOOP_OVERRIDE_EN			BIT(3)  /* PARF_DEVICE_TYPE register fields */  #define PARF_DEVICE_TYPE_EP			0x0 @@ -529,8 +529,8 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)  	writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);  	if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop) -		writel_relaxed(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN, -				pcie_ep->parf + PARF_NO_SNOOP_OVERIDE); +		writel_relaxed(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN, +				pcie_ep->parf + PARF_NO_SNOOP_OVERRIDE);  	return 0; @@ -825,6 +825,10 @@ static const struct pci_epc_features qcom_pcie_epc_features = {  	.msi_capable = true,  	.msix_capable = false,  	.align = SZ_4K, +	.bar[BAR_0] = { .only_64bit = true, }, +	.bar[BAR_1] = { .type = BAR_RESERVED, }, +	.bar[BAR_2] = { .only_64bit = true, }, +	.bar[BAR_3] = { .type = BAR_RESERVED, },  };  static const struct pci_epc_features * @@ -933,6 +937,7 @@ static const struct of_device_id qcom_pcie_ep_match[] = {  	{ .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0},  	{ .compatible = "qcom,sdx55-pcie-ep", },  	{ .compatible = "qcom,sm8450-pcie-ep", }, +	{ .compatible = "qcom,sar2130p-pcie-ep", },  	{ }  };  MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index e4d3366ead1f..dc98ae63362d 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -61,7 +61,7 @@  #define PARF_DBI_BASE_ADDR_V2_HI		0x354  #define PARF_SLV_ADDR_SPACE_SIZE_V2		0x358  #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI		0x35c -#define PARF_NO_SNOOP_OVERIDE			0x3d4 +#define PARF_NO_SNOOP_OVERRIDE			0x3d4  #define PARF_ATU_BASE_ADDR			0x634  #define PARF_ATU_BASE_ADDR_HI			0x638  #define PARF_DEVICE_TYPE			0x1000 @@ -135,9 +135,9 @@  #define PARF_INT_ALL_LINK_UP			BIT(13)  #define PARF_INT_MSI_DEV_0_7			GENMASK(30, 23) -/* PARF_NO_SNOOP_OVERIDE register fields */ -#define WR_NO_SNOOP_OVERIDE_EN			BIT(1) -#define RD_NO_SNOOP_OVERIDE_EN			BIT(3) +/* PARF_NO_SNOOP_OVERRIDE register fields */ +#define WR_NO_SNOOP_OVERRIDE_EN			BIT(1) +#define RD_NO_SNOOP_OVERRIDE_EN			BIT(3)  /* PARF_DEVICE_TYPE register fields */  #define DEVICE_TYPE_RC				0x4 @@ -1007,8 +1007,8 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)  	const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;  	if (pcie_cfg->override_no_snoop) -		writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN, -				pcie->parf + PARF_NO_SNOOP_OVERIDE); +		writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN, +				pcie->parf + PARF_NO_SNOOP_OVERRIDE);  	qcom_pcie_clear_aspm_l0s(pcie->pci);  	qcom_pcie_clear_hpc(pcie->pci); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 6084b38bdda1..ac27bda5ba26 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1356,7 +1356,7 @@ static struct pci_ops hv_pcifront_ops = {   *   * If the PF driver wishes to initiate communication, it can "invalidate" one or   * more of the first 64 blocks.  This invalidation is delivered via a callback - * supplied by the VF driver by this driver. + * supplied to the VF driver by this driver.   *   * No protocol is implied, except that supplied by the PF and VF drivers.   */ @@ -1757,8 +1757,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)  	spin_lock_irqsave(&multi_msi_cpu_lock, flags); -	cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids, -				     false); +	cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);  	cpu = cpu_next;  	spin_unlock_irqrestore(&multi_msi_cpu_lock, flags); diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 665f35f9d826..b0e3bce10aa4 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -1422,7 +1422,7 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)  }  /* - * devm_of_pci_get_host_bridge_resources() only sets up translateable resources, + * devm_of_pci_get_host_bridge_resources() only sets up translatable resources,   * so we need extra resource setup parsing our special DT properties encoding   * the MEM and IO apertures.   */ diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index b3cdbc5927de..d2f88997283a 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -2106,47 +2106,39 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)  static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)  {  	struct device *dev = pcie->dev; -	struct device_node *np = dev->of_node, *port; +	struct device_node *np = dev->of_node;  	const struct tegra_pcie_soc *soc = pcie->soc;  	u32 lanes = 0, mask = 0;  	unsigned int lane = 0;  	int err;  	/* parse root ports */ -	for_each_child_of_node(np, port) { +	for_each_child_of_node_scoped(np, port) {  		struct tegra_pcie_port *rp;  		unsigned int index;  		u32 value;  		char *label;  		err = of_pci_get_devfn(port); -		if (err < 0) { -			dev_err(dev, "failed to parse address: %d\n", err); -			goto err_node_put; -		} +		if (err < 0) +			return dev_err_probe(dev, err, "failed to parse address\n");  		index = PCI_SLOT(err); -		if (index < 1 || index > soc->num_ports) { -			dev_err(dev, "invalid port number: %d\n", index); -			err = -EINVAL; -			goto err_node_put; -		} +		if (index < 1 || index > soc->num_ports) +			return dev_err_probe(dev, -EINVAL, +					     "invalid port number: %d\n", index);  		index--;  		err = of_property_read_u32(port, "nvidia,num-lanes", &value); -		if (err < 0) { -			dev_err(dev, "failed to parse # of lanes: %d\n", -				err); -			goto err_node_put; -		} +		if (err < 0) +			return dev_err_probe(dev, err, +					     "failed to parse # of lanes\n"); -		if (value > 16) { -			dev_err(dev, "invalid # of lanes: %u\n", value); -			err = -EINVAL; -			goto err_node_put; -		} +		if (value > 16) +			return dev_err_probe(dev, -EINVAL, +					     "invalid # of lanes: %u\n", value);  		lanes |= value << (index << 3); @@ -2159,16 +2151,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)  		lane += value;  		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); -		if (!rp) { -			err = -ENOMEM; -			goto err_node_put; -		} +		if (!rp) +			return -ENOMEM;  		err = of_address_to_resource(port, 0, &rp->regs); -		if (err < 0) { -			dev_err(dev, "failed to parse address: %d\n", err); -			goto err_node_put; -		} +		if (err < 0) +			return dev_err_probe(dev, err, "failed to parse address\n");  		INIT_LIST_HEAD(&rp->list);  		rp->index = index; @@ -2177,16 +2165,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)  		rp->np = port;  		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs); -		if (IS_ERR(rp->base)) { -			err = PTR_ERR(rp->base); -			goto err_node_put; -		} +		if (IS_ERR(rp->base)) +			return PTR_ERR(rp->base);  		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index); -		if (!label) { -			err = -ENOMEM; -			goto err_node_put; -		} +		if (!label) +			return -ENOMEM;  		/*  		 * Returns -ENOENT if reset-gpios property is not populated @@ -2199,34 +2183,26 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)  						       GPIOD_OUT_LOW,  						       label);  		if (IS_ERR(rp->reset_gpio)) { -			if (PTR_ERR(rp->reset_gpio) == -ENOENT) { +			if (PTR_ERR(rp->reset_gpio) == -ENOENT)  				rp->reset_gpio = NULL; -			} else { -				dev_err(dev, "failed to get reset GPIO: %ld\n", -					PTR_ERR(rp->reset_gpio)); -				err = PTR_ERR(rp->reset_gpio); -				goto err_node_put; -			} +			else +				return dev_err_probe(dev, PTR_ERR(rp->reset_gpio), +						     "failed to get reset GPIO\n");  		}  		list_add_tail(&rp->list, &pcie->ports);  	}  	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config); -	if (err < 0) { -		dev_err(dev, "invalid lane configuration\n"); -		return err; -	} +	if (err < 0) +		return dev_err_probe(dev, err, +				     "invalid lane configuration\n");  	err = tegra_pcie_get_regulators(pcie, mask);  	if (err < 0)  		return err;  	return 0; - -err_node_put: -	of_node_put(port); -	return err;  }  /* diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c index b5bd10a62adb..08161065a89c 100644 --- a/drivers/pci/controller/pci-thunder-ecam.c +++ b/drivers/pci/controller/pci-thunder-ecam.c @@ -204,7 +204,7 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,  			v = readl(addr);  			if (v & 0xff00) -				pr_err("Bad MSIX cap header: %08x\n", v); +				pr_err("Bad MSI-X cap header: %08x\n", v);  			v |= 0xbc00; /* next capability is EA at 0xbc */  			set_val(v, where, size, val);  			return PCIBIOS_SUCCESSFUL; diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index 88c0977bc41a..7bce327897c9 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -154,7 +154,7 @@ static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)   * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors.  To maintain   * the expected behaviour of .set_affinity for each MSI interrupt, the 16   * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs - * for each core).  The MSI vector is moved fom 1 MSI GIC IRQ to another + * for each core).  The MSI vector is moved from 1 MSI GIC IRQ to another   * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core.  As a   * consequence, the total MSI vectors that X-Gene v1 supports will be   * reduced to 256 (2048/8) vectors. diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index eb55a7f8573a..70409e71a18f 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -6,6 +6,7 @@   * Description: Altera PCIe host controller driver   */ +#include <linux/bitfield.h>  #include <linux/delay.h>  #include <linux/interrupt.h>  #include <linux/irqchip/chained_irq.h> @@ -77,9 +78,25 @@  #define S10_TLP_FMTTYPE_CFGWR0		0x45  #define S10_TLP_FMTTYPE_CFGWR1		0x44 +#define AGLX_RP_CFG_ADDR(pcie, reg)	(((pcie)->hip_base) + (reg)) +#define AGLX_RP_SECONDARY(pcie)		\ +	readb(AGLX_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS)) + +#define AGLX_BDF_REG			0x00002004 +#define AGLX_ROOT_PORT_IRQ_STATUS	0x14c +#define AGLX_ROOT_PORT_IRQ_ENABLE	0x150 +#define CFG_AER				BIT(4) + +#define AGLX_CFG_TARGET			GENMASK(13, 12) +#define AGLX_CFG_TARGET_TYPE0		0 +#define AGLX_CFG_TARGET_TYPE1		1 +#define AGLX_CFG_TARGET_LOCAL_2000	2 +#define AGLX_CFG_TARGET_LOCAL_3000	3 +  enum altera_pcie_version {  	ALTERA_PCIE_V1 = 0,  	ALTERA_PCIE_V2, +	ALTERA_PCIE_V3,  };  struct altera_pcie { @@ -102,6 +119,11 @@ struct altera_pcie_ops {  			   int size, u32 *value);  	int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,  			    int where, int size, u32 value); +	int (*ep_read_cfg)(struct altera_pcie *pcie, u8 busno, +			   unsigned int devfn, int where, int size, u32 *value); +	int (*ep_write_cfg)(struct altera_pcie *pcie, u8 busno, +			    unsigned int devfn, int where, int size, u32 value); +	void (*rp_isr)(struct irq_desc *desc);  };  struct altera_pcie_data { @@ -112,6 +134,9 @@ struct altera_pcie_data {  	u32 cfgrd1;  	u32 cfgwr0;  	u32 cfgwr1; +	u32 port_conf_offset; +	u32 port_irq_status_offset; +	u32 port_irq_enable_offset;  };  struct tlp_rp_regpair_t { @@ -131,6 +156,28 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)  	return readl_relaxed(pcie->cra_base + reg);  } +static inline void cra_writew(struct altera_pcie *pcie, const u32 value, +			      const u32 reg) +{ +	writew_relaxed(value, pcie->cra_base + reg); +} + +static inline u32 cra_readw(struct altera_pcie *pcie, const u32 reg) +{ +	return readw_relaxed(pcie->cra_base + reg); +} + +static inline void cra_writeb(struct altera_pcie *pcie, const u32 value, +			      const u32 reg) +{ +	writeb_relaxed(value, pcie->cra_base + reg); +} + +static inline u32 cra_readb(struct altera_pcie *pcie, const u32 reg) +{ +	return readb_relaxed(pcie->cra_base + reg); +} +  static bool altera_pcie_link_up(struct altera_pcie *pcie)  {  	return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); @@ -145,11 +192,20 @@ static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)  	return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);  } +static bool aglx_altera_pcie_link_up(struct altera_pcie *pcie) +{ +	void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, +				   pcie->pcie_data->cap_offset + +				   PCI_EXP_LNKSTA); + +	return (readw_relaxed(addr) & PCI_EXP_LNKSTA_DLLLA); +} +  /*   * Altera PCIe port uses BAR0 of RC's configuration space as the translation   * from PCI bus to native BUS.  Entire DDR region is mapped into PCIe space   * using these registers, so it can be reached by DMA from EP devices. - * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt + * This BAR0 will also access to MSI vector when receiving MSI/MSI-X interrupt   * from EP devices, eventually trigger interrupt to GIC.  The BAR0 of bridge   * should be hidden during enumeration to avoid the sizing and resource   * allocation by PCIe core. @@ -425,6 +481,103 @@ static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,  	return PCIBIOS_SUCCESSFUL;  } +static int aglx_rp_read_cfg(struct altera_pcie *pcie, int where, +			    int size, u32 *value) +{ +	void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where); + +	switch (size) { +	case 1: +		*value = readb_relaxed(addr); +		break; +	case 2: +		*value = readw_relaxed(addr); +		break; +	default: +		*value = readl_relaxed(addr); +		break; +	} + +	/* Interrupt PIN not programmed in hardware, set to INTA. */ +	if (where == PCI_INTERRUPT_PIN && size == 1 && !(*value)) +		*value = 0x01; +	else if (where == PCI_INTERRUPT_LINE && !(*value & 0xff00)) +		*value |= 0x0100; + +	return PCIBIOS_SUCCESSFUL; +} + +static int aglx_rp_write_cfg(struct altera_pcie *pcie, u8 busno, +			     int where, int size, u32 value) +{ +	void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where); + +	switch (size) { +	case 1: +		writeb_relaxed(value, addr); +		break; +	case 2: +		writew_relaxed(value, addr); +		break; +	default: +		writel_relaxed(value, addr); +		break; +	} + +	/* +	 * Monitor changes to PCI_PRIMARY_BUS register on Root Port +	 * and update local copy of root bus number accordingly. +	 */ +	if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS) +		pcie->root_bus_nr = value & 0xff; + +	return PCIBIOS_SUCCESSFUL; +} + +static int aglx_ep_write_cfg(struct altera_pcie *pcie, u8 busno, +			     unsigned int devfn, int where, int size, u32 value) +{ +	cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG); +	if (busno > AGLX_RP_SECONDARY(pcie)) +		where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1); + +	switch (size) { +	case 1: +		cra_writeb(pcie, value, where); +		break; +	case 2: +		cra_writew(pcie, value, where); +		break; +	default: +		cra_writel(pcie, value, where); +			break; +	} + +	return PCIBIOS_SUCCESSFUL; +} + +static int aglx_ep_read_cfg(struct altera_pcie *pcie, u8 busno, +			    unsigned int devfn, int where, int size, u32 *value) +{ +	cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG); +	if (busno > AGLX_RP_SECONDARY(pcie)) +		where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1); + +	switch (size) { +	case 1: +		*value = cra_readb(pcie, where); +		break; +	case 2: +		*value = cra_readw(pcie, where); +		break; +	default: +		*value = cra_readl(pcie, where); +		break; +	} + +	return PCIBIOS_SUCCESSFUL; +} +  static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,  				 unsigned int devfn, int where, int size,  				 u32 *value) @@ -437,6 +590,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,  		return pcie->pcie_data->ops->rp_read_cfg(pcie, where,  							 size, value); +	if (pcie->pcie_data->ops->ep_read_cfg) +		return pcie->pcie_data->ops->ep_read_cfg(pcie, busno, devfn, +							where, size, value); +  	switch (size) {  	case 1:  		byte_en = 1 << (where & 3); @@ -481,6 +638,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,  		return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,  						     where, size, value); +	if (pcie->pcie_data->ops->ep_write_cfg) +		return pcie->pcie_data->ops->ep_write_cfg(pcie, busno, devfn, +						     where, size, value); +  	switch (size) {  	case 1:  		data32 = (value & 0xff) << shift; @@ -659,7 +820,32 @@ static void altera_pcie_isr(struct irq_desc *desc)  				dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit);  		}  	} +	chained_irq_exit(chip, desc); +} + +static void aglx_isr(struct irq_desc *desc) +{ +	struct irq_chip *chip = irq_desc_get_chip(desc); +	struct altera_pcie *pcie; +	struct device *dev; +	u32 status; +	int ret; + +	chained_irq_enter(chip, desc); +	pcie = irq_desc_get_handler_data(desc); +	dev = &pcie->pdev->dev; + +	status = readl(pcie->hip_base + pcie->pcie_data->port_conf_offset + +		       pcie->pcie_data->port_irq_status_offset); +	if (status & CFG_AER) { +		writel(CFG_AER, (pcie->hip_base + pcie->pcie_data->port_conf_offset + +				 pcie->pcie_data->port_irq_status_offset)); + +		ret = generic_handle_domain_irq(pcie->irq_domain, 0); +		if (ret) +			dev_err_ratelimited(dev, "unexpected IRQ %d\n", pcie->irq); +	}  	chained_irq_exit(chip, desc);  } @@ -694,9 +880,9 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)  	if (IS_ERR(pcie->cra_base))  		return PTR_ERR(pcie->cra_base); -	if (pcie->pcie_data->version == ALTERA_PCIE_V2) { -		pcie->hip_base = -			devm_platform_ioremap_resource_byname(pdev, "Hip"); +	if (pcie->pcie_data->version == ALTERA_PCIE_V2 || +	    pcie->pcie_data->version == ALTERA_PCIE_V3) { +		pcie->hip_base = devm_platform_ioremap_resource_byname(pdev, "Hip");  		if (IS_ERR(pcie->hip_base))  			return PTR_ERR(pcie->hip_base);  	} @@ -706,7 +892,7 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)  	if (pcie->irq < 0)  		return pcie->irq; -	irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); +	irq_set_chained_handler_and_data(pcie->irq, pcie->pcie_data->ops->rp_isr, pcie);  	return 0;  } @@ -719,6 +905,7 @@ static const struct altera_pcie_ops altera_pcie_ops_1_0 = {  	.tlp_read_pkt = tlp_read_packet,  	.tlp_write_pkt = tlp_write_packet,  	.get_link_status = altera_pcie_link_up, +	.rp_isr = altera_pcie_isr,  };  static const struct altera_pcie_ops altera_pcie_ops_2_0 = { @@ -727,6 +914,16 @@ static const struct altera_pcie_ops altera_pcie_ops_2_0 = {  	.get_link_status = s10_altera_pcie_link_up,  	.rp_read_cfg = s10_rp_read_cfg,  	.rp_write_cfg = s10_rp_write_cfg, +	.rp_isr = altera_pcie_isr, +}; + +static const struct altera_pcie_ops altera_pcie_ops_3_0 = { +	.rp_read_cfg = aglx_rp_read_cfg, +	.rp_write_cfg = aglx_rp_write_cfg, +	.get_link_status = aglx_altera_pcie_link_up, +	.ep_read_cfg = aglx_ep_read_cfg, +	.ep_write_cfg = aglx_ep_write_cfg, +	.rp_isr = aglx_isr,  };  static const struct altera_pcie_data altera_pcie_1_0_data = { @@ -749,11 +946,44 @@ static const struct altera_pcie_data altera_pcie_2_0_data = {  	.cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,  }; +static const struct altera_pcie_data altera_pcie_3_0_f_tile_data = { +	.ops = &altera_pcie_ops_3_0, +	.version = ALTERA_PCIE_V3, +	.cap_offset = 0x70, +	.port_conf_offset = 0x14000, +	.port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS, +	.port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE, +}; + +static const struct altera_pcie_data altera_pcie_3_0_p_tile_data = { +	.ops = &altera_pcie_ops_3_0, +	.version = ALTERA_PCIE_V3, +	.cap_offset = 0x70, +	.port_conf_offset = 0x104000, +	.port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS, +	.port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE, +}; + +static const struct altera_pcie_data altera_pcie_3_0_r_tile_data = { +	.ops = &altera_pcie_ops_3_0, +	.version = ALTERA_PCIE_V3, +	.cap_offset = 0x70, +	.port_conf_offset = 0x1300, +	.port_irq_status_offset = 0x0, +	.port_irq_enable_offset = 0x4, +}; +  static const struct of_device_id altera_pcie_of_match[] = {  	{.compatible = "altr,pcie-root-port-1.0",  	 .data = &altera_pcie_1_0_data },  	{.compatible = "altr,pcie-root-port-2.0",  	 .data = &altera_pcie_2_0_data }, +	{.compatible = "altr,pcie-root-port-3.0-f-tile", +	 .data = &altera_pcie_3_0_f_tile_data }, +	{.compatible = "altr,pcie-root-port-3.0-p-tile", +	 .data = &altera_pcie_3_0_p_tile_data }, +	{.compatible = "altr,pcie-root-port-3.0-r-tile", +	 .data = &altera_pcie_3_0_r_tile_data },  	{},  }; @@ -791,11 +1021,18 @@ static int altera_pcie_probe(struct platform_device *pdev)  		return ret;  	} -	/* clear all interrupts */ -	cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); -	/* enable all interrupts */ -	cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); -	altera_pcie_host_init(pcie); +	if (pcie->pcie_data->version == ALTERA_PCIE_V1 || +	    pcie->pcie_data->version == ALTERA_PCIE_V2) { +		/* clear all interrupts */ +		cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); +		/* enable all interrupts */ +		cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); +		altera_pcie_host_init(pcie); +	} else if (pcie->pcie_data->version == ALTERA_PCIE_V3) { +		writel(CFG_AER, +		       pcie->hip_base + pcie->pcie_data->port_conf_offset + +		       pcie->pcie_data->port_irq_enable_offset); +	}  	bridge->sysdata = pcie;  	bridge->busnr = pcie->root_bus_nr; diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index a7e51bc1c2fe..18e11b9a7f46 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -732,7 +732,6 @@ static int apple_pcie_init(struct pci_config_window *cfg)  {  	struct device *dev = cfg->parent;  	struct platform_device *platform = to_platform_device(dev); -	struct device_node *of_port;  	struct apple_pcie *pcie;  	int ret; @@ -755,11 +754,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)  	if (ret)  		return ret; -	for_each_child_of_node(dev->of_node, of_port) { +	for_each_child_of_node_scoped(dev->of_node, of_port) {  		ret = apple_pcie_setup_port(pcie, of_port);  		if (ret) {  			dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret); -			of_node_put(of_port);  			return ret;  		}  	} diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index e733a27dc8df..e19628e13898 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -40,7 +40,7 @@  /* Broadcom STB PCIe Register Offsets */  #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1				0x0188  #define  PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK	0xc -#define  PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN			0x0 +#define  PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN			0x0  #define PCIE_RC_CFG_PRIV1_ID_VAL3			0x043c  #define  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK	0xffffff @@ -55,6 +55,10 @@  #define PCIE_RC_DL_MDIO_WR_DATA				0x1104  #define PCIE_RC_DL_MDIO_RD_DATA				0x1108 +#define PCIE_RC_PL_PHY_CTL_15				0x184c +#define  PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK		0x400000 +#define  PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK	0xff +  #define PCIE_MISC_MISC_CTRL				0x4008  #define  PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK	0x80  #define  PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK	0x400 @@ -146,9 +150,6 @@  #define  MSI_INT_MASK_SET		0x10  #define  MSI_INT_MASK_CLR		0x14 -#define PCIE_EXT_CFG_DATA				0x8000 -#define PCIE_EXT_CFG_INDEX				0x9000 -  #define  PCIE_RGR1_SW_INIT_1_PERST_MASK			0x1  #define  PCIE_RGR1_SW_INIT_1_PERST_SHIFT		0x0 @@ -174,8 +175,9 @@  #define MDIO_PORT0			0x0  #define MDIO_DATA_MASK			0x7fffffff  #define MDIO_PORT_MASK			0xf0000 +#define MDIO_PORT_EXT_MASK		0x200000  #define MDIO_REGAD_MASK			0xffff -#define MDIO_CMD_MASK			0xfff00000 +#define MDIO_CMD_MASK			0x00100000  #define MDIO_CMD_READ			0x1  #define MDIO_CMD_WRITE			0x0  #define MDIO_DATA_DONE_MASK		0x80000000 @@ -191,11 +193,11 @@  #define SSC_STATUS_PLL_LOCK_MASK	0x800  #define PCIE_BRCM_MAX_MEMC		3 -#define IDX_ADDR(pcie)			((pcie)->reg_offsets[EXT_CFG_INDEX]) -#define DATA_ADDR(pcie)			((pcie)->reg_offsets[EXT_CFG_DATA]) -#define PCIE_RGR1_SW_INIT_1(pcie)	((pcie)->reg_offsets[RGR1_SW_INIT_1]) -#define HARD_DEBUG(pcie)		((pcie)->reg_offsets[PCIE_HARD_DEBUG]) -#define INTR2_CPU_BASE(pcie)		((pcie)->reg_offsets[PCIE_INTR2_CPU_BASE]) +#define IDX_ADDR(pcie)			((pcie)->cfg->offsets[EXT_CFG_INDEX]) +#define DATA_ADDR(pcie)			((pcie)->cfg->offsets[EXT_CFG_DATA]) +#define PCIE_RGR1_SW_INIT_1(pcie)	((pcie)->cfg->offsets[RGR1_SW_INIT_1]) +#define HARD_DEBUG(pcie)		((pcie)->cfg->offsets[PCIE_HARD_DEBUG]) +#define INTR2_CPU_BASE(pcie)		((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE])  /* Rescal registers */  #define PCIE_DVT_PMU_PCIE_PHY_CTRL				0xc700 @@ -234,13 +236,24 @@ struct inbound_win {  	u64 cpu_addr;  }; +/* + * The RESCAL block is tied to PCIe controller #1, regardless of the number of + * controllers, and turning off PCIe controller #1 prevents access to the RESCAL + * register blocks, therefore no other controller can access this register + * space, and depending upon the bus fabric we may get a timeout (UBUS/GISB), + * or a hang (AXI). + */ +#define CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN		BIT(0) +  struct pcie_cfg_data {  	const int *offsets;  	const enum pcie_soc_base soc_base;  	const bool has_phy; +	const u32 quirks;  	u8 num_inbound_wins;  	int (*perst_set)(struct brcm_pcie *pcie, u32 val);  	int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); +	int (*post_setup)(struct brcm_pcie *pcie);  };  struct subdev_regulators { @@ -276,8 +289,6 @@ struct brcm_pcie {  	int			gen;  	u64			msi_target_addr;  	struct brcm_msi		*msi; -	const int		*reg_offsets; -	enum pcie_soc_base	soc_base;  	struct reset_control	*rescal;  	struct reset_control	*perst_reset;  	struct reset_control	*bridge_reset; @@ -285,17 +296,14 @@ struct brcm_pcie {  	int			num_memc;  	u64			memc_size[PCIE_BRCM_MAX_MEMC];  	u32			hw_rev; -	int			(*perst_set)(struct brcm_pcie *pcie, u32 val); -	int			(*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);  	struct subdev_regulators *sr;  	bool			ep_wakeup_capable; -	bool			has_phy; -	u8			num_inbound_wins; +	const struct pcie_cfg_data	*cfg;  };  static inline bool is_bmips(const struct brcm_pcie *pcie)  { -	return pcie->soc_base == BCM7435 || pcie->soc_base == BCM7425; +	return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425;  }  /* @@ -309,8 +317,8 @@ static int brcm_pcie_encode_ibar_size(u64 size)  	if (log2_in >= 12 && log2_in <= 15)  		/* Covers 4KB to 32KB (inclusive) */  		return (log2_in - 12) + 0x1c; -	else if (log2_in >= 16 && log2_in <= 35) -		/* Covers 64KB to 32GB, (inclusive) */ +	else if (log2_in >= 16 && log2_in <= 36) +		/* Covers 64KB to 64GB, (inclusive) */  		return log2_in - 15;  	/* Something is awry so disable */  	return 0; @@ -320,6 +328,7 @@ static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)  {  	u32 pkt = 0; +	pkt |= FIELD_PREP(MDIO_PORT_EXT_MASK, port >> 4);  	pkt |= FIELD_PREP(MDIO_PORT_MASK, port);  	pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);  	pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd); @@ -403,12 +412,12 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)  static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)  {  	u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); -	u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); +	u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); -	lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen; -	writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); +	u32p_replace_bits(&lnkcap, gen, PCI_EXP_LNKCAP_SLS); +	writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); -	lnkctl2 = (lnkctl2 & ~0xf) | gen; +	u16p_replace_bits(&lnkctl2, gen, PCI_EXP_LNKCTL2_TLS);  	writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);  } @@ -550,7 +559,7 @@ static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,  		return hwirq;  	for (i = 0; i < nr_irqs; i++) -		irq_domain_set_info(domain, virq + i, hwirq + i, +		irq_domain_set_info(domain, virq + i, (irq_hw_number_t)hwirq + i,  				    &brcm_msi_bottom_irq_chip, domain->host_data,  				    handle_edge_irq, NULL, NULL);  	return 0; @@ -717,8 +726,8 @@ static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,  	/* For devices, write to the config space index register */  	idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0); -	writel(idx, pcie->base + PCIE_EXT_CFG_INDEX); -	return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where); +	writel(idx, base + IDX_ADDR(pcie)); +	return base + DATA_ADDR(pcie) + PCIE_ECAM_REG(where);  }  static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus, @@ -821,6 +830,39 @@ static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)  	return 0;  } +static int brcm_pcie_post_setup_bcm2712(struct brcm_pcie *pcie) +{ +	static const u16 data[] = { 0x50b9, 0xbda1, 0x0094, 0x97b4, 0x5030, +				    0x5030, 0x0007 }; +	static const u8 regs[] = { 0x16, 0x17, 0x18, 0x19, 0x1b, 0x1c, 0x1e }; +	int ret, i; +	u32 tmp; + +	/* Allow a 54MHz (xosc) refclk source */ +	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 0x1600); +	if (ret < 0) +		return ret; + +	for (i = 0; i < ARRAY_SIZE(regs); i++) { +		ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, regs[i], data[i]); +		if (ret < 0) +			return ret; +	} + +	usleep_range(100, 200); + +	/* +	 * Set L1SS sub-state timers to avoid lengthy state transitions, +	 * PM clock period is 18.52ns (1/54MHz, round down). +	 */ +	tmp = readl(pcie->base + PCIE_RC_PL_PHY_CTL_15); +	tmp &= ~PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK; +	tmp |= 0x12; +	writel(tmp, pcie->base + PCIE_RC_PL_PHY_CTL_15); + +	return 0; +} +  static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size,  			    u64 cpu_addr, u64 pci_offset)  { @@ -855,7 +897,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,  	 * security considerations, and is not implemented in our modern  	 * SoCs.  	 */ -	if (pcie->soc_base != BCM7712) +	if (pcie->cfg->soc_base != BCM7712)  		add_inbound_win(b++, &n, 0, 0, 0);  	resource_list_for_each_entry(entry, &bridge->dma_ranges) { @@ -872,10 +914,10 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,  		 * That being said, each BARs size must still be a power of  		 * two.  		 */ -		if (pcie->soc_base == BCM7712) +		if (pcie->cfg->soc_base == BCM7712)  			add_inbound_win(b++, &n, size, cpu_start, pcie_start); -		if (n > pcie->num_inbound_wins) +		if (n > pcie->cfg->num_inbound_wins)  			break;  	} @@ -889,7 +931,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,  	 * that enables multiple memory controllers.  As such, it can return  	 * now w/o doing special configuration.  	 */ -	if (pcie->soc_base == BCM7712) +	if (pcie->cfg->soc_base == BCM7712)  		return n;  	ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1, @@ -1012,7 +1054,7 @@ static void set_inbound_win_registers(struct brcm_pcie *pcie,  		 * 7712:  		 *     All of their BARs need to be set.  		 */ -		if (pcie->soc_base == BCM7712) { +		if (pcie->cfg->soc_base == BCM7712) {  			/* BUS remap register settings */  			reg_offset = brcm_ubus_reg_offset(i);  			tmp = lower_32_bits(cpu_addr) & ~0xfff; @@ -1036,15 +1078,15 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  	int memc, ret;  	/* Reset the bridge */ -	ret = pcie->bridge_sw_init_set(pcie, 1); +	ret = pcie->cfg->bridge_sw_init_set(pcie, 1);  	if (ret)  		return ret;  	/* Ensure that PERST# is asserted; some bootloaders may deassert it. */ -	if (pcie->soc_base == BCM2711) { -		ret = pcie->perst_set(pcie, 1); +	if (pcie->cfg->soc_base == BCM2711) { +		ret = pcie->cfg->perst_set(pcie, 1);  		if (ret) { -			pcie->bridge_sw_init_set(pcie, 0); +			pcie->cfg->bridge_sw_init_set(pcie, 0);  			return ret;  		}  	} @@ -1052,7 +1094,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  	usleep_range(100, 200);  	/* Take the bridge out of reset */ -	ret = pcie->bridge_sw_init_set(pcie, 0); +	ret = pcie->cfg->bridge_sw_init_set(pcie, 0);  	if (ret)  		return ret; @@ -1072,9 +1114,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  	 */  	if (is_bmips(pcie))  		burst = 0x1; /* 256 bytes */ -	else if (pcie->soc_base == BCM2711) +	else if (pcie->cfg->soc_base == BCM2711)  		burst = 0x0; /* 128 bytes */ -	else if (pcie->soc_base == BCM7278) +	else if (pcie->cfg->soc_base == BCM7278)  		burst = 0x3; /* 512 bytes */  	else  		burst = 0x2; /* 512 bytes */ @@ -1180,10 +1222,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  	/* PCIe->SCB endian mode for inbound window */  	tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); -	u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN, +	u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN,  		PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);  	writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); +	if (pcie->cfg->post_setup) { +		ret = pcie->cfg->post_setup(pcie); +		if (ret < 0) +			return ret; +	} +  	return 0;  } @@ -1199,7 +1247,7 @@ static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)  	u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */  	/* 7712 does not have this (RGR1) timer */ -	if (pcie->soc_base == BCM7712) +	if (pcie->cfg->soc_base == BCM7712)  		return;  	/* Each unit in timeout register is 1/216,000,000 seconds */ @@ -1276,8 +1324,12 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)  	bool ssc_good = false;  	int ret, i; +	/* Limit the generation if specified */ +	if (pcie->gen) +		brcm_pcie_set_gen(pcie, pcie->gen); +  	/* Unassert the fundamental reset */ -	ret = pcie->perst_set(pcie, 0); +	ret = pcie->cfg->perst_set(pcie, 0);  	if (ret)  		return ret; @@ -1302,9 +1354,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)  	brcm_config_clkreq(pcie); -	if (pcie->gen) -		brcm_pcie_set_gen(pcie, pcie->gen); -  	if (pcie->ssc) {  		ret = brcm_pcie_set_ssc(pcie);  		if (ret == 0) @@ -1367,7 +1416,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus)  		ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);  		if (ret) { -			dev_info(dev, "No regulators for downstream device\n"); +			dev_info(dev, "Did not get regulators, err=%d\n", ret); +			pcie->sr = NULL;  			goto no_regulators;  		} @@ -1390,7 +1440,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus)  	struct subdev_regulators *sr = pcie->sr;  	struct device *dev = &bus->dev; -	if (!sr) +	if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))  		return;  	if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) @@ -1463,12 +1513,12 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)  static inline int brcm_phy_start(struct brcm_pcie *pcie)  { -	return pcie->has_phy ? brcm_phy_cntl(pcie, 1) : 0; +	return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0;  }  static inline int brcm_phy_stop(struct brcm_pcie *pcie)  { -	return pcie->has_phy ? brcm_phy_cntl(pcie, 0) : 0; +	return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0;  }  static int brcm_pcie_turn_off(struct brcm_pcie *pcie) @@ -1479,7 +1529,7 @@ static int brcm_pcie_turn_off(struct brcm_pcie *pcie)  	if (brcm_pcie_link_up(pcie))  		brcm_pcie_enter_l23(pcie);  	/* Assert fundamental reset */ -	ret = pcie->perst_set(pcie, 1); +	ret = pcie->cfg->perst_set(pcie, 1);  	if (ret)  		return ret; @@ -1493,8 +1543,9 @@ static int brcm_pcie_turn_off(struct brcm_pcie *pcie)  	u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);  	writel(tmp, base + HARD_DEBUG(pcie)); -	/* Shutdown PCIe bridge */ -	ret = pcie->bridge_sw_init_set(pcie, 1); +	if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN)) +		/* Shutdown PCIe bridge */ +		ret = pcie->cfg->bridge_sw_init_set(pcie, 1);  	return ret;  } @@ -1582,7 +1633,7 @@ static int brcm_pcie_resume_noirq(struct device *dev)  		goto err_reset;  	/* Take bridge out of reset so we can access the SERDES reg */ -	pcie->bridge_sw_init_set(pcie, 0); +	pcie->cfg->bridge_sw_init_set(pcie, 0);  	/* SERDES_IDDQ = 0 */  	tmp = readl(base + HARD_DEBUG(pcie)); @@ -1660,7 +1711,7 @@ static void brcm_pcie_remove(struct platform_device *pdev)  static const int pcie_offsets[] = {  	[RGR1_SW_INIT_1]	= 0x9210,  	[EXT_CFG_INDEX]		= 0x9000, -	[EXT_CFG_DATA]		= 0x9004, +	[EXT_CFG_DATA]		= 0x8000,  	[PCIE_HARD_DEBUG]	= 0x4204,  	[PCIE_INTR2_CPU_BASE]	= 0x4300,  }; @@ -1668,7 +1719,7 @@ static const int pcie_offsets[] = {  static const int pcie_offsets_bcm7278[] = {  	[RGR1_SW_INIT_1]	= 0xc010,  	[EXT_CFG_INDEX]		= 0x9000, -	[EXT_CFG_DATA]		= 0x9004, +	[EXT_CFG_DATA]		= 0x8000,  	[PCIE_HARD_DEBUG]	= 0x4204,  	[PCIE_INTR2_CPU_BASE]	= 0x4300,  }; @@ -1682,8 +1733,9 @@ static const int pcie_offsets_bcm7425[] = {  };  static const int pcie_offsets_bcm7712[] = { +	[RGR1_SW_INIT_1]	= 0x9210,  	[EXT_CFG_INDEX]		= 0x9000, -	[EXT_CFG_DATA]		= 0x9004, +	[EXT_CFG_DATA]		= 0x8000,  	[PCIE_HARD_DEBUG]	= 0x4304,  	[PCIE_INTR2_CPU_BASE]	= 0x4400,  }; @@ -1704,6 +1756,16 @@ static const struct pcie_cfg_data bcm2711_cfg = {  	.num_inbound_wins = 3,  }; +static const struct pcie_cfg_data bcm2712_cfg = { +	.offsets	= pcie_offsets_bcm7712, +	.soc_base	= BCM7712, +	.perst_set	= brcm_pcie_perst_set_7278, +	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +	.post_setup	= brcm_pcie_post_setup_bcm2712, +	.quirks		= CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN, +	.num_inbound_wins = 10, +}; +  static const struct pcie_cfg_data bcm4908_cfg = {  	.offsets	= pcie_offsets,  	.soc_base	= BCM4908, @@ -1755,6 +1817,7 @@ static const struct pcie_cfg_data bcm7712_cfg = {  static const struct of_device_id brcm_pcie_match[] = {  	{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg }, +	{ .compatible = "brcm,bcm2712-pcie", .data = &bcm2712_cfg },  	{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },  	{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },  	{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg }, @@ -1784,7 +1847,7 @@ static struct pci_ops brcm7425_pcie_ops = {  static int brcm_pcie_probe(struct platform_device *pdev)  { -	struct device_node *np = pdev->dev.of_node, *msi_np; +	struct device_node *np = pdev->dev.of_node;  	struct pci_host_bridge *bridge;  	const struct pcie_cfg_data *data;  	struct brcm_pcie *pcie; @@ -1803,12 +1866,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)  	pcie = pci_host_bridge_priv(bridge);  	pcie->dev = &pdev->dev;  	pcie->np = np; -	pcie->reg_offsets = data->offsets; -	pcie->soc_base = data->soc_base; -	pcie->perst_set = data->perst_set; -	pcie->bridge_sw_init_set = data->bridge_sw_init_set; -	pcie->has_phy = data->has_phy; -	pcie->num_inbound_wins = data->num_inbound_wins; +	pcie->cfg = data;  	pcie->base = devm_platform_ioremap_resource(pdev, 0);  	if (IS_ERR(pcie->base)) @@ -1843,7 +1901,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)  	if (ret)  		return dev_err_probe(&pdev->dev, ret, "could not enable clock\n"); -	pcie->bridge_sw_init_set(pcie, 0); +	pcie->cfg->bridge_sw_init_set(pcie, 0);  	if (pcie->swinit_reset) {  		ret = reset_control_assert(pcie->swinit_reset); @@ -1882,22 +1940,29 @@ static int brcm_pcie_probe(struct platform_device *pdev)  		goto fail;  	pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION); -	if (pcie->soc_base == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) { +	if (pcie->cfg->soc_base == BCM4908 && +	    pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {  		dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");  		ret = -ENODEV;  		goto fail;  	} -	msi_np = of_parse_phandle(pcie->np, "msi-parent", 0); -	if (pci_msi_enabled() && msi_np == pcie->np) { -		ret = brcm_pcie_enable_msi(pcie); +	if (pci_msi_enabled()) { +		struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0); + +		if (msi_np == pcie->np) +			ret = brcm_pcie_enable_msi(pcie); + +		of_node_put(msi_np); +  		if (ret) {  			dev_err(pcie->dev, "probe of internal MSI failed");  			goto fail;  		}  	} -	bridge->ops = pcie->soc_base == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops; +	bridge->ops = pcie->cfg->soc_base == BCM7425 ? +				&brcm7425_pcie_ops : &brcm_pcie_ops;  	bridge->sysdata = pcie;  	platform_set_drvdata(pdev, pcie); @@ -1940,3 +2005,4 @@ module_platform_driver(brcm_pcie_driver);  MODULE_LICENSE("GPL");  MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");  MODULE_AUTHOR("Broadcom"); +MODULE_SOFTDEP("pre: irq_bcm2712_mip"); diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index aa24ac9aaecc..9d52504acae4 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -15,6 +15,7 @@  #include <linux/irqchip/chained_irq.h>  #include <linux/irqdomain.h>  #include <linux/kernel.h> +#include <linux/mfd/syscon.h>  #include <linux/module.h>  #include <linux/msi.h>  #include <linux/of_device.h> @@ -24,6 +25,7 @@  #include <linux/platform_device.h>  #include <linux/pm_domain.h>  #include <linux/pm_runtime.h> +#include <linux/regmap.h>  #include <linux/reset.h>  #include "../pci.h" @@ -352,7 +354,8 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,  		dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",  			range_type, *num, (unsigned long long)cpu_addr, -			(unsigned long long)pci_addr, (unsigned long long)table_size); +			(unsigned long long)pci_addr, +			(unsigned long long)table_size);  		cpu_addr += table_size;  		pci_addr += table_size; @@ -887,7 +890,8 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)  	for (i = 0; i < num_resets; i++)  		pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i]; -	ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets); +	ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, +							  pcie->phy_resets);  	if (ret) {  		dev_err(dev, "failed to get PHY bulk reset\n");  		return ret; @@ -917,22 +921,27 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)  		return pcie->num_clks;  	} -       ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes); -       if (ret == 0) { -	       if (num_lanes == 0 || num_lanes > 16 || (num_lanes != 1 && num_lanes % 2)) +	ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes); +	if (ret == 0) { +		if (num_lanes == 0 || num_lanes > 16 || +		    (num_lanes != 1 && num_lanes % 2))  			dev_warn(dev, "invalid num-lanes, using controller defaults\n"); -	       else +		else  			pcie->num_lanes = num_lanes; -       } +	}  	return 0;  }  static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)  { +	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);  	struct device *dev = pcie->dev; +	struct resource_entry *entry; +	struct regmap *pbus_regmap; +	u32 val, args[2], size; +	resource_size_t addr;  	int err; -	u32 val;  	/*  	 * The controller may have been left out of reset by the bootloader @@ -940,12 +949,31 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)  	 */  	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,  				  pcie->phy_resets); -	reset_control_assert(pcie->mac_reset);  	/* Wait for the time needed to complete the reset lines assert. */  	msleep(PCIE_EN7581_RESET_TIME_MS);  	/* +	 * Configure PBus base address and base address mask to allow the +	 * hw to detect if a given address is accessible on PCIe controller. +	 */ +	pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node, +							   "mediatek,pbus-csr", +							   ARRAY_SIZE(args), +							   args); +	if (IS_ERR(pbus_regmap)) +		return PTR_ERR(pbus_regmap); + +	entry = resource_list_first_type(&host->windows, IORESOURCE_MEM); +	if (!entry) +		return -ENODEV; + +	addr = entry->res->start - entry->offset; +	regmap_write(pbus_regmap, args[0], lower_32_bits(addr)); +	size = lower_32_bits(resource_size(entry->res)); +	regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size))); + +	/*  	 * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581  	 * requires PHY initialization and power-on before PHY reset deassert.  	 */ @@ -961,7 +989,8 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)  		goto err_phy_on;  	} -	err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); +	err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, +					  pcie->phy_resets);  	if (err) {  		dev_err(dev, "failed to deassert PHYs\n");  		goto err_phy_deassert; @@ -1006,7 +1035,8 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)  err_clk_prepare_enable:  	pm_runtime_put_sync(dev);  	pm_runtime_disable(dev); -	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); +	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, +				  pcie->phy_resets);  err_phy_deassert:  	phy_power_off(pcie->phy);  err_phy_on: @@ -1030,7 +1060,8 @@ static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)  	usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);  	/* PHY power on and enable pipe clock */ -	err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); +	err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, +					  pcie->phy_resets);  	if (err) {  		dev_err(dev, "failed to deassert PHYs\n");  		return err; @@ -1070,7 +1101,8 @@ err_clk_init:  err_phy_on:  	phy_exit(pcie->phy);  err_phy_init: -	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); +	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, +				  pcie->phy_resets);  	return err;  } @@ -1085,7 +1117,8 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)  	phy_power_off(pcie->phy);  	phy_exit(pcie->phy); -	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); +	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, +				  pcie->phy_resets);  }  static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie) @@ -1112,7 +1145,8 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)  	 * Deassert the line in order to avoid unbalance in deassert_count  	 * counter since the bulk is shared.  	 */ -	reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); +	reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, +				    pcie->phy_resets);  	/* Don't touch the hardware registers before power up */  	err = pcie->soc->power_up(pcie); diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 3bcfc4e58ba2..811a8b4acd50 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -1041,24 +1041,22 @@ err_free_ck:  static int mtk_pcie_setup(struct mtk_pcie *pcie)  {  	struct device *dev = pcie->dev; -	struct device_node *node = dev->of_node, *child; +	struct device_node *node = dev->of_node;  	struct mtk_pcie_port *port, *tmp;  	int err, slot;  	slot = of_get_pci_domain_nr(dev->of_node);  	if (slot < 0) { -		for_each_available_child_of_node(node, child) { +		for_each_available_child_of_node_scoped(node, child) {  			err = of_pci_get_devfn(child); -			if (err < 0) { -				dev_err(dev, "failed to get devfn: %d\n", err); -				goto error_put_node; -			} +			if (err < 0) +				return dev_err_probe(dev, err, "failed to get devfn\n");  			slot = PCI_SLOT(err);  			err = mtk_pcie_parse_port(pcie, child, slot);  			if (err) -				goto error_put_node; +				return err;  		}  	} else {  		err = mtk_pcie_parse_port(pcie, node, slot); @@ -1079,9 +1077,6 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)  		mtk_pcie_subsys_powerdown(pcie);  	return 0; -error_put_node: -	of_node_put(child); -	return err;  }  static int mtk_pcie_probe(struct platform_device *pdev) diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c index 776caa0b1011..01ead2f92e87 100644 --- a/drivers/pci/controller/pcie-mt7621.c +++ b/drivers/pci/controller/pcie-mt7621.c @@ -258,30 +258,25 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)  {  	struct device *dev = pcie->dev;  	struct platform_device *pdev = to_platform_device(dev); -	struct device_node *node = dev->of_node, *child; +	struct device_node *node = dev->of_node;  	int err;  	pcie->base = devm_platform_ioremap_resource(pdev, 0);  	if (IS_ERR(pcie->base))  		return PTR_ERR(pcie->base); -	for_each_available_child_of_node(node, child) { +	for_each_available_child_of_node_scoped(node, child) {  		int slot;  		err = of_pci_get_devfn(child); -		if (err < 0) { -			of_node_put(child); -			dev_err(dev, "failed to parse devfn: %d\n", err); -			return err; -		} +		if (err < 0) +			return dev_err_probe(dev, err, "failed to parse devfn\n");  		slot = PCI_SLOT(err);  		err = mt7621_pcie_parse_port(pcie, child, slot); -		if (err) { -			of_node_put(child); +		if (err)  			return err; -		}  	}  	return 0; diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index 7c92eada04af..c32b803a47c7 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -178,8 +178,8 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,  	 * space, it's generally only accessible when in endpoint mode.  	 * When in root complex mode, the controller is unable to target  	 * itself with either type 0 or type 1 accesses, and indeed, any -	 * controller initiated target transfer to its own config space -	 * result in a completer abort. +	 * controller-initiated target transfer to its own config space +	 * results in a completer abort.  	 *  	 * Each channel effectively only supports a single device, but as  	 * the same channel <-> device access works for any PCI_SLOT() @@ -775,7 +775,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)  	if (err)  		return err; -	/* Two irqs are for MSI, but they are also used for non-MSI irqs */ +	/* Two IRQs are for MSI, but they are also used for non-MSI IRQs */  	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,  			       IRQF_SHARED | IRQF_NO_THREAD,  			       rcar_msi_bottom_chip.name, host); @@ -792,7 +792,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)  		goto err;  	} -	/* disable all MSIs */ +	/* Disable all MSIs */  	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);  	/* @@ -892,6 +892,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,  			dev_err(pcie->dev, "Failed to map inbound regions!\n");  			return -EINVAL;  		} +  		/*  		 * If the size of the range is larger than the alignment of  		 * the start address, we have to use multiple entries to @@ -903,6 +904,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,  			size = min(size, alignment);  		} +  		/* Hardware supports max 4GiB inbound region */  		size = min(size, 1ULL << 32); diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 5adac6adc046..6a46be17aa91 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -367,7 +367,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)  		}  	} -	rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, +	rockchip_pcie_write(rockchip, PCI_VENDOR_ID_ROCKCHIP,  			    PCIE_CORE_CONFIG_VENDOR);  	rockchip_pcie_write(rockchip,  			    PCI_CLASS_BRIDGE_PCI_NORMAL << 8, diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 11def598534b..14954f43e5e9 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -200,7 +200,6 @@  #define AXI_WRAPPER_NOR_MSG			0xc  #define PCIE_RC_SEND_PME_OFF			0x11960 -#define ROCKCHIP_VENDOR_ID			0x1d87  #define PCIE_LINK_IS_L2(x) \  	(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)  #define PCIE_LINK_TRAINING_DONE(x) \ diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index 81e8bfae53d0..13ca493d22bd 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -84,6 +84,7 @@ enum xilinx_cpm_version {  	CPM,  	CPM5,  	CPM5_HOST1, +	CPM5NC_HOST,  };  /** @@ -478,6 +479,9 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)  {  	const struct xilinx_cpm_variant *variant = port->variant; +	if (variant->version == CPM5NC_HOST) +		return; +  	if (cpm_pcie_link_up(port))  		dev_info(port->dev, "PCIe Link is UP\n");  	else @@ -538,7 +542,8 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,  	if (IS_ERR(port->cfg))  		return PTR_ERR(port->cfg); -	if (port->variant->version == CPM5) { +	if (port->variant->version == CPM5 || +	    port->variant->version == CPM5_HOST1) {  		port->reg_base = devm_platform_ioremap_resource_byname(pdev,  								    "cpm_csr");  		if (IS_ERR(port->reg_base)) @@ -578,28 +583,34 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)  	port->dev = dev; -	err = xilinx_cpm_pcie_init_irq_domain(port); -	if (err) -		return err; +	port->variant = of_device_get_match_data(dev); -	bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); -	if (!bus) -		return -ENODEV; +	if (port->variant->version != CPM5NC_HOST) { +		err = xilinx_cpm_pcie_init_irq_domain(port); +		if (err) +			return err; +	} -	port->variant = of_device_get_match_data(dev); +	bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); +	if (!bus) { +		err = -ENODEV; +		goto err_free_irq_domains; +	}  	err = xilinx_cpm_pcie_parse_dt(port, bus->res);  	if (err) {  		dev_err(dev, "Parsing DT failed\n"); -		goto err_parse_dt; +		goto err_free_irq_domains;  	}  	xilinx_cpm_pcie_init_port(port); -	err = xilinx_cpm_setup_irq(port); -	if (err) { -		dev_err(dev, "Failed to set up interrupts\n"); -		goto err_setup_irq; +	if (port->variant->version != CPM5NC_HOST) { +		err = xilinx_cpm_setup_irq(port); +		if (err) { +			dev_err(dev, "Failed to set up interrupts\n"); +			goto err_setup_irq; +		}  	}  	bridge->sysdata = port->cfg; @@ -612,11 +623,13 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)  	return 0;  err_host_bridge: -	xilinx_cpm_free_interrupts(port); +	if (port->variant->version != CPM5NC_HOST) +		xilinx_cpm_free_interrupts(port);  err_setup_irq:  	pci_ecam_free(port->cfg); -err_parse_dt: -	xilinx_cpm_free_irq_domains(port); +err_free_irq_domains: +	if (port->variant->version != CPM5NC_HOST) +		xilinx_cpm_free_irq_domains(port);  	return err;  } @@ -639,6 +652,10 @@ static const struct xilinx_cpm_variant cpm5_host1 = {  	.ir_enable = XILINX_CPM_PCIE1_IR_ENABLE,  }; +static const struct xilinx_cpm_variant cpm5n_host = { +	.version = CPM5NC_HOST, +}; +  static const struct of_device_id xilinx_cpm_pcie_of_match[] = {  	{  		.compatible = "xlnx,versal-cpm-host-1.00", @@ -652,6 +669,10 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = {  		.compatible = "xlnx,versal-cpm5-host1",  		.data = &cpm5_host1,  	}, +	{ +		.compatible = "xlnx,versal-cpm5nc-host", +		.data = &cpm5n_host, +	},  	{}  }; diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 9d9596947350..8df064b62a2f 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -17,6 +17,8 @@  #include <linux/rculist.h>  #include <linux/rcupdate.h> +#include <xen/xen.h> +  #include <asm/irqdomain.h>  #define VMD_CFGBAR	0 @@ -125,7 +127,7 @@ struct vmd_irq_list {  struct vmd_dev {  	struct pci_dev		*dev; -	spinlock_t		cfg_lock; +	raw_spinlock_t		cfg_lock;  	void __iomem		*cfgbar;  	int msix_count; @@ -391,7 +393,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,  	if (!addr)  		return -EFAULT; -	spin_lock_irqsave(&vmd->cfg_lock, flags); +	raw_spin_lock_irqsave(&vmd->cfg_lock, flags);  	switch (len) {  	case 1:  		*value = readb(addr); @@ -406,7 +408,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,  		ret = -EINVAL;  		break;  	} -	spin_unlock_irqrestore(&vmd->cfg_lock, flags); +	raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);  	return ret;  } @@ -426,7 +428,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,  	if (!addr)  		return -EFAULT; -	spin_lock_irqsave(&vmd->cfg_lock, flags); +	raw_spin_lock_irqsave(&vmd->cfg_lock, flags);  	switch (len) {  	case 1:  		writeb(value, addr); @@ -444,7 +446,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,  		ret = -EINVAL;  		break;  	} -	spin_unlock_irqrestore(&vmd->cfg_lock, flags); +	raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);  	return ret;  } @@ -970,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)  	struct vmd_dev *vmd;  	int err; +	if (xen_domain()) { +		/* +		 * Xen doesn't have knowledge about devices in the VMD bus +		 * because the config space of devices behind the VMD bridge is +		 * not known to Xen, and hence Xen cannot discover or configure +		 * them in any way. +		 * +		 * Bypass of MSI remapping won't work in that case as direct +		 * write by Linux to the MSI entries won't result in functional +		 * interrupts, as Xen is the entity that manages the host +		 * interrupt controller and must configure interrupts.  However +		 * multiplexing of interrupts by the VMD bridge will work under +		 * Xen, so force the usage of that mode which must always be +		 * supported by VMD bridges. +		 */ +		features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP; +	} +  	if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))  		return -ENOMEM; @@ -1009,7 +1029,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)  	if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)  		vmd->first_vec = 1; -	spin_lock_init(&vmd->cfg_lock); +	raw_spin_lock_init(&vmd->cfg_lock);  	pci_set_drvdata(dev, vmd);  	err = vmd_enable_domain(vmd, features);  	if (err) | 
