diff options
Diffstat (limited to 'drivers/net/dsa')
27 files changed, 2963 insertions, 437 deletions
| diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index a5f1aa911fe2..7b1457a6e327 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -70,6 +70,7 @@ config NET_DSA_QCA8K  config NET_DSA_REALTEK_SMI  	tristate "Realtek SMI Ethernet switch family support"  	select NET_DSA_TAG_RTL4_A +	select NET_DSA_TAG_RTL8_4  	select FIXED_PHY  	select IRQ_DOMAIN  	select REALTEK_PHY diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index f3598c040994..8da1569a34e6 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_NET_DSA_MT7530)	+= mt7530.o  obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o  obj-$(CONFIG_NET_DSA_QCA8K)	+= qca8k.o  obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o -realtek-smi-objs		:= realtek-smi-core.o rtl8366.o rtl8366rb.o +realtek-smi-objs		:= realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o  obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o  obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o  obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 604f54112665..af4761968733 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1222,7 +1222,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,  		return;  	/* Enable flow control on BCM5301x's CPU port */ -	if (is5301x(dev) && port == dev->cpu_port) +	if (is5301x(dev) && dsa_is_cpu_port(ds, port))  		tx_pause = rx_pause = true;  	if (phydev->pause) { @@ -1291,12 +1291,6 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,  				return;  			}  		} -	} else if (is5301x(dev)) { -		if (port != dev->cpu_port) { -			b53_force_port_config(dev, dev->cpu_port, 2000, -					      DUPLEX_FULL, true, true); -			b53_force_link(dev, dev->cpu_port, 1); -		}  	}  	/* Re-negotiate EEE if it was enabled already */ @@ -1349,10 +1343,8 @@ void b53_phylink_validate(struct dsa_switch *ds, int port,  		phylink_set(mask, 100baseT_Full);  	} -	bitmap_and(supported, supported, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); -	bitmap_and(state->advertising, state->advertising, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask);  	phylink_helper_basex_speed(state);  } @@ -1550,7 +1542,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,  }  EXPORT_SYMBOL(b53_vlan_del); -/* Address Resolution Logic routines */ +/* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */  static int b53_arl_op_wait(struct b53_device *dev)  {  	unsigned int timeout = 10; @@ -1715,6 +1707,7 @@ int b53_fdb_add(struct dsa_switch *ds, int port,  		const unsigned char *addr, u16 vid)  {  	struct b53_device *priv = ds->priv; +	int ret;  	/* 5325 and 5365 require some more massaging, but could  	 * be supported eventually @@ -1722,7 +1715,11 @@ int b53_fdb_add(struct dsa_switch *ds, int port,  	if (is5325(priv) || is5365(priv))  		return -EOPNOTSUPP; -	return b53_arl_op(priv, 0, port, addr, vid, true); +	mutex_lock(&priv->arl_mutex); +	ret = b53_arl_op(priv, 0, port, addr, vid, true); +	mutex_unlock(&priv->arl_mutex); + +	return ret;  }  EXPORT_SYMBOL(b53_fdb_add); @@ -1730,8 +1727,13 @@ int b53_fdb_del(struct dsa_switch *ds, int port,  		const unsigned char *addr, u16 vid)  {  	struct b53_device *priv = ds->priv; +	int ret; + +	mutex_lock(&priv->arl_mutex); +	ret = b53_arl_op(priv, 0, port, addr, vid, false); +	mutex_unlock(&priv->arl_mutex); -	return b53_arl_op(priv, 0, port, addr, vid, false); +	return ret;  }  EXPORT_SYMBOL(b53_fdb_del); @@ -1788,6 +1790,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,  	int ret;  	u8 reg; +	mutex_lock(&priv->arl_mutex); +  	/* Start search operation */  	reg = ARL_SRCH_STDN;  	b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); @@ -1795,18 +1799,18 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,  	do {  		ret = b53_arl_search_wait(priv);  		if (ret) -			return ret; +			break;  		b53_arl_search_rd(priv, 0, &results[0]);  		ret = b53_fdb_copy(port, &results[0], cb, data);  		if (ret) -			return ret; +			break;  		if (priv->num_arl_bins > 2) {  			b53_arl_search_rd(priv, 1, &results[1]);  			ret = b53_fdb_copy(port, &results[1], cb, data);  			if (ret) -				return ret; +				break;  			if (!results[0].is_valid && !results[1].is_valid)  				break; @@ -1814,6 +1818,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,  	} while (count++ < b53_max_arl_entries(priv) / 2); +	mutex_unlock(&priv->arl_mutex); +  	return 0;  }  EXPORT_SYMBOL(b53_fdb_dump); @@ -1822,6 +1828,7 @@ int b53_mdb_add(struct dsa_switch *ds, int port,  		const struct switchdev_obj_port_mdb *mdb)  {  	struct b53_device *priv = ds->priv; +	int ret;  	/* 5325 and 5365 require some more massaging, but could  	 * be supported eventually @@ -1829,7 +1836,11 @@ int b53_mdb_add(struct dsa_switch *ds, int port,  	if (is5325(priv) || is5365(priv))  		return -EOPNOTSUPP; -	return b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); +	mutex_lock(&priv->arl_mutex); +	ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); +	mutex_unlock(&priv->arl_mutex); + +	return ret;  }  EXPORT_SYMBOL(b53_mdb_add); @@ -1839,7 +1850,9 @@ int b53_mdb_del(struct dsa_switch *ds, int port,  	struct b53_device *priv = ds->priv;  	int ret; +	mutex_lock(&priv->arl_mutex);  	ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); +	mutex_unlock(&priv->arl_mutex);  	if (ret)  		dev_err(ds->dev, "failed to delete MDB entry\n"); @@ -2302,33 +2315,30 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.chip_id = BCM5325_DEVICE_ID,  		.dev_name = "BCM5325",  		.vlans = 16, -		.enabled_ports = 0x1f, +		.enabled_ports = 0x3f,  		.arl_bins = 2,  		.arl_buckets = 1024,  		.imp_port = 5, -		.cpu_port = B53_CPU_PORT_25,  		.duplex_reg = B53_DUPLEX_STAT_FE,  	},  	{  		.chip_id = BCM5365_DEVICE_ID,  		.dev_name = "BCM5365",  		.vlans = 256, -		.enabled_ports = 0x1f, +		.enabled_ports = 0x3f,  		.arl_bins = 2,  		.arl_buckets = 1024,  		.imp_port = 5, -		.cpu_port = B53_CPU_PORT_25,  		.duplex_reg = B53_DUPLEX_STAT_FE,  	},  	{  		.chip_id = BCM5389_DEVICE_ID,  		.dev_name = "BCM5389",  		.vlans = 4096, -		.enabled_ports = 0x1f, +		.enabled_ports = 0x11f,  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2338,11 +2348,10 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.chip_id = BCM5395_DEVICE_ID,  		.dev_name = "BCM5395",  		.vlans = 4096, -		.enabled_ports = 0x1f, +		.enabled_ports = 0x11f,  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2352,11 +2361,10 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.chip_id = BCM5397_DEVICE_ID,  		.dev_name = "BCM5397",  		.vlans = 4096, -		.enabled_ports = 0x1f, +		.enabled_ports = 0x11f,  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.vta_regs = B53_VTA_REGS_9798,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2366,11 +2374,10 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.chip_id = BCM5398_DEVICE_ID,  		.dev_name = "BCM5398",  		.vlans = 4096, -		.enabled_ports = 0x7f, +		.enabled_ports = 0x17f,  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.vta_regs = B53_VTA_REGS_9798,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2380,12 +2387,11 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.chip_id = BCM53115_DEVICE_ID,  		.dev_name = "BCM53115",  		.vlans = 4096, -		.enabled_ports = 0x1f, +		.enabled_ports = 0x11f,  		.arl_bins = 4,  		.arl_buckets = 1024,  		.vta_regs = B53_VTA_REGS,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,  		.jumbo_size_reg = B53_JUMBO_MAX_SIZE, @@ -2394,11 +2400,10 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.chip_id = BCM53125_DEVICE_ID,  		.dev_name = "BCM53125",  		.vlans = 4096, -		.enabled_ports = 0xff, +		.enabled_ports = 0x1ff,  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2412,7 +2417,6 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2426,7 +2430,6 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.vta_regs = B53_VTA_REGS_63XX,  		.duplex_reg = B53_DUPLEX_STAT_63XX,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, @@ -2436,11 +2439,10 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.chip_id = BCM53010_DEVICE_ID,  		.dev_name = "BCM53010",  		.vlans = 4096, -		.enabled_ports = 0x1f, +		.enabled_ports = 0x1bf,  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2454,7 +2456,6 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2468,7 +2469,6 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2478,11 +2478,10 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.chip_id = BCM53018_DEVICE_ID,  		.dev_name = "BCM53018",  		.vlans = 4096, -		.enabled_ports = 0x1f, +		.enabled_ports = 0x1bf,  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2492,11 +2491,10 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.chip_id = BCM53019_DEVICE_ID,  		.dev_name = "BCM53019",  		.vlans = 4096, -		.enabled_ports = 0x1f, +		.enabled_ports = 0x1bf,  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2510,7 +2508,6 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2524,7 +2521,6 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2539,7 +2535,6 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.arl_bins = 4,  		.arl_buckets = 256,  		.imp_port = 8, -		.cpu_port = 8, /* TODO: ports 4, 5, 8 */  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2553,7 +2548,6 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.arl_bins = 4,  		.arl_buckets = 1024,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2567,7 +2561,6 @@ static const struct b53_chip_data b53_switch_chips[] = {  		.arl_bins = 4,  		.arl_buckets = 256,  		.imp_port = 8, -		.cpu_port = B53_CPU_PORT,  		.vta_regs = B53_VTA_REGS,  		.duplex_reg = B53_DUPLEX_STAT_GE,  		.jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2593,7 +2586,6 @@ static int b53_switch_init(struct b53_device *dev)  			dev->vta_regs[2] = chip->vta_regs[2];  			dev->jumbo_pm_reg = chip->jumbo_pm_reg;  			dev->imp_port = chip->imp_port; -			dev->cpu_port = chip->cpu_port;  			dev->num_vlans = chip->vlans;  			dev->num_arl_bins = chip->arl_bins;  			dev->num_arl_buckets = chip->arl_buckets; @@ -2625,16 +2617,8 @@ static int b53_switch_init(struct b53_device *dev)  			break;  #endif  		} -	} else if (dev->chip_id == BCM53115_DEVICE_ID) { -		u64 strap_value; - -		b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value); -		/* use second IMP port if GMII is enabled */ -		if (strap_value & SV_GMII_CTRL_115) -			dev->cpu_port = 5;  	} -	dev->enabled_ports |= BIT(dev->cpu_port);  	dev->num_ports = fls(dev->enabled_ports);  	dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); @@ -2705,6 +2689,7 @@ struct b53_device *b53_switch_alloc(struct device *base,  	mutex_init(&dev->reg_mutex);  	mutex_init(&dev->stats_mutex); +	mutex_init(&dev->arl_mutex);  	return dev;  } diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 959a52d41f0a..579da74ada64 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -107,6 +107,7 @@ struct b53_device {  	struct mutex reg_mutex;  	struct mutex stats_mutex; +	struct mutex arl_mutex;  	const struct b53_io_ops *ops;  	/* chip specific data */ @@ -124,7 +125,6 @@ struct b53_device {  	/* used ports mask */  	u16 enabled_ports;  	unsigned int imp_port; -	unsigned int cpu_port;  	/* connect specific data */  	u8 current_page; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 7578a5c38df5..13aa43b5cffd 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -667,7 +667,9 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)  	if (priv->int_phy_mask & BIT(port))  		return priv->hw_params.gphy_rev;  	else -		return 0; +		return PHY_BRCM_AUTO_PWRDWN_ENABLE | +		       PHY_BRCM_DIS_TXCRXC_NOENRGY | +		       PHY_BRCM_IDDQ_SUSPEND;  }  static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, @@ -683,7 +685,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,  	    state->interface != PHY_INTERFACE_MODE_GMII &&  	    state->interface != PHY_INTERFACE_MODE_INTERNAL &&  	    state->interface != PHY_INTERFACE_MODE_MOCA) { -		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +		linkmode_zero(supported);  		if (port != core_readl(priv, CORE_IMP0_PRT_ID))  			dev_err(ds->dev,  				"Unsupported interface: %d for port %d\n", @@ -711,10 +713,8 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,  	phylink_set(mask, 100baseT_Half);  	phylink_set(mask, 100baseT_Full); -	bitmap_and(supported, supported, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); -	bitmap_and(state->advertising, state->advertising, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask);  }  static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 354655f9ed00..4e0b53d94b52 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -1403,10 +1403,8 @@ static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,  	else  		phylink_set(mask, 1000baseT_Full); -	bitmap_and(supported, supported, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); -	bitmap_and(state->advertising, state->advertising, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask);  }  static int diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index dbd4486a173f..7056d98d8177 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -276,6 +276,7 @@ struct gswip_priv {  	int num_gphy_fw;  	struct gswip_gphy_fw *gphy_fw;  	u32 port_vlan_filter; +	struct mutex pce_table_lock;  };  struct gswip_pce_table_entry { @@ -523,10 +524,14 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,  	u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :  					GSWIP_PCE_TBL_CTRL_OPMOD_ADRD; +	mutex_lock(&priv->pce_table_lock); +  	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,  				     GSWIP_PCE_TBL_CTRL_BAS); -	if (err) +	if (err) { +		mutex_unlock(&priv->pce_table_lock);  		return err; +	}  	gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);  	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | @@ -536,8 +541,10 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,  	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,  				     GSWIP_PCE_TBL_CTRL_BAS); -	if (err) +	if (err) { +		mutex_unlock(&priv->pce_table_lock);  		return err; +	}  	for (i = 0; i < ARRAY_SIZE(tbl->key); i++)  		tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i)); @@ -553,6 +560,8 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,  	tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);  	tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7; +	mutex_unlock(&priv->pce_table_lock); +  	return 0;  } @@ -565,10 +574,14 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv,  	u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :  					GSWIP_PCE_TBL_CTRL_OPMOD_ADWR; +	mutex_lock(&priv->pce_table_lock); +  	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,  				     GSWIP_PCE_TBL_CTRL_BAS); -	if (err) +	if (err) { +		mutex_unlock(&priv->pce_table_lock);  		return err; +	}  	gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);  	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | @@ -600,8 +613,12 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv,  	crtl |= GSWIP_PCE_TBL_CTRL_BAS;  	gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL); -	return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, -				      GSWIP_PCE_TBL_CTRL_BAS); +	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, +				     GSWIP_PCE_TBL_CTRL_BAS); + +	mutex_unlock(&priv->pce_table_lock); + +	return err;  }  /* Add the LAN port into a bridge with the CPU port by @@ -1447,10 +1464,8 @@ static void gswip_phylink_set_capab(unsigned long *supported,  	phylink_set(mask, 100baseT_Half);  	phylink_set(mask, 100baseT_Full); -	bitmap_and(supported, supported, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); -	bitmap_and(state->advertising, state->advertising, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask);  }  static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port, @@ -1478,7 +1493,7 @@ static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,  			goto unsupported;  		break;  	default: -		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +		linkmode_zero(supported);  		dev_err(ds->dev, "Unsupported port: %i\n", port);  		return;  	} @@ -1488,7 +1503,7 @@ static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,  	return;  unsupported: -	bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_zero(supported);  	dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",  		phy_modes(state->interface), port);  } @@ -1518,7 +1533,7 @@ static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,  			goto unsupported;  		break;  	default: -		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +		linkmode_zero(supported);  		dev_err(ds->dev, "Unsupported port: %i\n", port);  		return;  	} @@ -1528,7 +1543,7 @@ static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,  	return;  unsupported: -	bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_zero(supported);  	dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",  		phy_modes(state->interface), port);  } @@ -2106,6 +2121,7 @@ static int gswip_probe(struct platform_device *pdev)  	priv->ds->priv = priv;  	priv->ds->ops = priv->hw_info->ops;  	priv->dev = dev; +	mutex_init(&priv->pce_table_lock);  	version = gswip_switch_r(priv, GSWIP_VERSION);  	np = dev->of_node; diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index c5142f86a3c7..43fc3087aeb3 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1542,15 +1542,13 @@ static void ksz8_validate(struct dsa_switch *ds, int port,  	phylink_set(mask, 100baseT_Half);  	phylink_set(mask, 100baseT_Full); -	bitmap_and(supported, supported, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); -	bitmap_and(state->advertising, state->advertising, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask);  	return;  unsupported: -	bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_zero(supported);  	dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",  		phy_modes(state->interface), port);  } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8dadcae93c9b..14c678a9e41b 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -674,9 +674,8 @@ static void mv88e6xxx_validate(struct dsa_switch *ds, int port,  	if (chip->info->ops->phylink_validate)  		chip->info->ops->phylink_validate(chip, port, mask, state); -	bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); -	bitmap_and(state->advertising, state->advertising, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask);  	/* We can only operate at 2500BaseX or 1000BaseX.  If requested  	 * to advertise both, only report advertising at 2500BaseX. diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 341236dcbdb4..83808e7dbdda 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -958,8 +958,10 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)  	switch_node = dev->of_node;  	ports_node = of_get_child_by_name(switch_node, "ports"); +	if (!ports_node) +		ports_node = of_get_child_by_name(switch_node, "ethernet-ports");  	if (!ports_node) { -		dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); +		dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n");  		return -ENODEV;  	} diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 11b42fd812e4..45c5ec7a83ea 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -943,7 +943,7 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,  	if (state->interface != PHY_INTERFACE_MODE_NA &&  	    state->interface != ocelot_port->phy_mode) { -		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +		linkmode_zero(supported);  		return;  	} @@ -965,10 +965,8 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,  		phylink_set(mask, 2500baseX_Full);  	} -	bitmap_and(supported, supported, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); -	bitmap_and(state->advertising, state->advertising, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask);  }  static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port, diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index de1d34a1f1e4..92eae63150ea 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -999,7 +999,7 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,  	if (state->interface != PHY_INTERFACE_MODE_NA &&  	    state->interface != ocelot_port->phy_mode) { -		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +		linkmode_zero(supported);  		return;  	} @@ -1018,10 +1018,8 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,  		phylink_set(mask, 2500baseX_Full);  	} -	bitmap_and(supported, supported, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); -	bitmap_and(state->advertising, state->advertising, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask);  }  static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port, diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index a6bfb6abc51a..da0d7e68643a 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -522,7 +522,7 @@ static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,  			goto unsupported;  		break;  	default: -		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +		linkmode_zero(supported);  		dev_err(ds->dev, "Unsupported port: %i\n", port);  		return;  	} @@ -536,15 +536,13 @@ static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,  	phylink_set(mask, 100baseT_Half);  	phylink_set(mask, 100baseT_Full); -	bitmap_and(supported, supported, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); -	bitmap_and(state->advertising, state->advertising, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask);  	return;  unsupported: -	bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_zero(supported);  	dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",  		state->interface, port);  } diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index a984f06f6f04..ea7f12778922 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -889,62 +889,183 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)  }  static int -qca8k_setup_of_rgmii_delay(struct qca8k_priv *priv) +qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)  { -	struct device_node *port_dn; -	phy_interface_t mode; -	struct dsa_port *dp; -	u32 val; +	u32 mask = 0; +	int ret = 0; -	/* CPU port is already checked */ -	dp = dsa_to_port(priv->ds, 0); +	/* SoC specific settings for ipq8064. +	 * If more device require this consider adding +	 * a dedicated binding. +	 */ +	if (of_machine_is_compatible("qcom,ipq8064")) +		mask |= QCA8K_MAC_PWR_RGMII0_1_8V; + +	/* SoC specific settings for ipq8065 */ +	if (of_machine_is_compatible("qcom,ipq8065")) +		mask |= QCA8K_MAC_PWR_RGMII1_1_8V; + +	if (mask) { +		ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL, +				QCA8K_MAC_PWR_RGMII0_1_8V | +				QCA8K_MAC_PWR_RGMII1_1_8V, +				mask); +	} + +	return ret; +} -	port_dn = dp->dn; +static int qca8k_find_cpu_port(struct dsa_switch *ds) +{ +	struct qca8k_priv *priv = ds->priv; -	/* Check if port 0 is set to the correct type */ -	of_get_phy_mode(port_dn, &mode); -	if (mode != PHY_INTERFACE_MODE_RGMII_ID && -	    mode != PHY_INTERFACE_MODE_RGMII_RXID && -	    mode != PHY_INTERFACE_MODE_RGMII_TXID) { +	/* Find the connected cpu port. Valid port are 0 or 6 */ +	if (dsa_is_cpu_port(ds, 0))  		return 0; + +	dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6"); + +	if (dsa_is_cpu_port(ds, 6)) +		return 6; + +	return -EINVAL; +} + +static int +qca8k_setup_of_pws_reg(struct qca8k_priv *priv) +{ +	struct device_node *node = priv->dev->of_node; +	const struct qca8k_match_data *data; +	u32 val = 0; +	int ret; + +	/* QCA8327 require to set to the correct mode. +	 * His bigger brother QCA8328 have the 172 pin layout. +	 * Should be applied by default but we set this just to make sure. +	 */ +	if (priv->switch_id == QCA8K_ID_QCA8327) { +		data = of_device_get_match_data(priv->dev); + +		/* Set the correct package of 148 pin for QCA8327 */ +		if (data->reduced_package) +			val |= QCA8327_PWS_PACKAGE148_EN; + +		ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN, +				val); +		if (ret) +			return ret;  	} -	switch (mode) { -	case PHY_INTERFACE_MODE_RGMII_ID: -	case PHY_INTERFACE_MODE_RGMII_RXID: -		if (of_property_read_u32(port_dn, "rx-internal-delay-ps", &val)) -			val = 2; -		else -			/* Switch regs accept value in ns, convert ps to ns */ -			val = val / 1000; +	if (of_property_read_bool(node, "qca,ignore-power-on-sel")) +		val |= QCA8K_PWS_POWER_ON_SEL; -		if (val > QCA8K_MAX_DELAY) { -			dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); -			val = 3; +	if (of_property_read_bool(node, "qca,led-open-drain")) { +		if (!(val & QCA8K_PWS_POWER_ON_SEL)) { +			dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set."); +			return -EINVAL;  		} -		priv->rgmii_rx_delay = val; -		/* Stop here if we need to check only for rx delay */ -		if (mode != PHY_INTERFACE_MODE_RGMII_ID) -			break; +		val |= QCA8K_PWS_LED_OPEN_EN_CSR; +	} -		fallthrough; -	case PHY_INTERFACE_MODE_RGMII_TXID: -		if (of_property_read_u32(port_dn, "tx-internal-delay-ps", &val)) -			val = 1; -		else -			/* Switch regs accept value in ns, convert ps to ns */ -			val = val / 1000; +	return qca8k_rmw(priv, QCA8K_REG_PWS, +			QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL, +			val); +} -		if (val > QCA8K_MAX_DELAY) { -			dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); -			val = 3; -		} +static int +qca8k_parse_port_config(struct qca8k_priv *priv) +{ +	int port, cpu_port_index = -1, ret; +	struct device_node *port_dn; +	phy_interface_t mode; +	struct dsa_port *dp; +	u32 delay; -		priv->rgmii_tx_delay = val; -		break; -	default: -		return 0; +	/* We have 2 CPU port. Check them */ +	for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) { +		/* Skip every other port */ +		if (port != 0 && port != 6) +			continue; + +		dp = dsa_to_port(priv->ds, port); +		port_dn = dp->dn; +		cpu_port_index++; + +		if (!of_device_is_available(port_dn)) +			continue; + +		ret = of_get_phy_mode(port_dn, &mode); +		if (ret) +			continue; + +		switch (mode) { +		case PHY_INTERFACE_MODE_RGMII: +		case PHY_INTERFACE_MODE_RGMII_ID: +		case PHY_INTERFACE_MODE_RGMII_TXID: +		case PHY_INTERFACE_MODE_RGMII_RXID: +		case PHY_INTERFACE_MODE_SGMII: +			delay = 0; + +			if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) +				/* Switch regs accept value in ns, convert ps to ns */ +				delay = delay / 1000; +			else if (mode == PHY_INTERFACE_MODE_RGMII_ID || +				 mode == PHY_INTERFACE_MODE_RGMII_TXID) +				delay = 1; + +			if (delay > QCA8K_MAX_DELAY) { +				dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); +				delay = 3; +			} + +			priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay; + +			delay = 0; + +			if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) +				/* Switch regs accept value in ns, convert ps to ns */ +				delay = delay / 1000; +			else if (mode == PHY_INTERFACE_MODE_RGMII_ID || +				 mode == PHY_INTERFACE_MODE_RGMII_RXID) +				delay = 2; + +			if (delay > QCA8K_MAX_DELAY) { +				dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); +				delay = 3; +			} + +			priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay; + +			/* Skip sgmii parsing for rgmii* mode */ +			if (mode == PHY_INTERFACE_MODE_RGMII || +			    mode == PHY_INTERFACE_MODE_RGMII_ID || +			    mode == PHY_INTERFACE_MODE_RGMII_TXID || +			    mode == PHY_INTERFACE_MODE_RGMII_RXID) +				break; + +			if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) +				priv->ports_config.sgmii_tx_clk_falling_edge = true; + +			if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) +				priv->ports_config.sgmii_rx_clk_falling_edge = true; + +			if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) { +				priv->ports_config.sgmii_enable_pll = true; + +				if (priv->switch_id == QCA8K_ID_QCA8327) { +					dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling"); +					priv->ports_config.sgmii_enable_pll = false; +				} + +				if (priv->switch_revision < 2) +					dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more."); +			} + +			break; +		default: +			continue; +		}  	}  	return 0; @@ -954,15 +1075,20 @@ static int  qca8k_setup(struct dsa_switch *ds)  {  	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -	int ret, i; +	int cpu_port, ret, i;  	u32 mask; -	/* Make sure that port 0 is the cpu port */ -	if (!dsa_is_cpu_port(ds, 0)) { -		dev_err(priv->dev, "port 0 is not the CPU port"); -		return -EINVAL; +	cpu_port = qca8k_find_cpu_port(ds); +	if (cpu_port < 0) { +		dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6"); +		return cpu_port;  	} +	/* Parse CPU port config to be later used in phy_link mac_config */ +	ret = qca8k_parse_port_config(priv); +	if (ret) +		return ret; +  	mutex_init(&priv->reg_mutex);  	/* Start by setting up the register mapping */ @@ -975,7 +1101,11 @@ qca8k_setup(struct dsa_switch *ds)  	if (ret)  		return ret; -	ret = qca8k_setup_of_rgmii_delay(priv); +	ret = qca8k_setup_of_pws_reg(priv); +	if (ret) +		return ret; + +	ret = qca8k_setup_mac_pwr_sel(priv);  	if (ret)  		return ret; @@ -992,41 +1122,49 @@ qca8k_setup(struct dsa_switch *ds)  	if (ret)  		dev_warn(priv->dev, "mib init failed"); -	/* Enable QCA header mode on the cpu port */ -	ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT), -			  QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | -			  QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); -	if (ret) { -		dev_err(priv->dev, "failed enabling QCA header mode"); -		return ret; -	} - -	/* Disable forwarding by default on all ports */ +	/* Initial setup of all ports */  	for (i = 0; i < QCA8K_NUM_PORTS; i++) { +		/* Disable forwarding by default on all ports */  		ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),  				QCA8K_PORT_LOOKUP_MEMBER, 0);  		if (ret)  			return ret; -	} -	/* Disable MAC by default on all ports */ -	for (i = 1; i < QCA8K_NUM_PORTS; i++) -		qca8k_port_set_status(priv, i, 0); +		/* Enable QCA header mode on all cpu ports */ +		if (dsa_is_cpu_port(ds, i)) { +			ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), +					  QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | +					  QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); +			if (ret) { +				dev_err(priv->dev, "failed enabling QCA header mode"); +				return ret; +			} +		} + +		/* Disable MAC by default on all user ports */ +		if (dsa_is_user_port(ds, i)) +			qca8k_port_set_status(priv, i, 0); +	} -	/* Forward all unknown frames to CPU port for Linux processing */ +	/* Forward all unknown frames to CPU port for Linux processing +	 * Notice that in multi-cpu config only one port should be set +	 * for igmp, unknown, multicast and broadcast packet +	 */  	ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, -			  BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | -			  BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | -			  BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | -			  BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); +			  BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | +			  BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | +			  BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | +			  BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);  	if (ret)  		return ret; -	/* Setup connection between CPU port & user ports */ +	/* Setup connection between CPU port & user ports +	 * Configure specific switch configuration for ports +	 */  	for (i = 0; i < QCA8K_NUM_PORTS; i++) {  		/* CPU port gets connected to all user ports of the switch */  		if (dsa_is_cpu_port(ds, i)) { -			ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT), +			ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),  					QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));  			if (ret)  				return ret; @@ -1038,7 +1176,7 @@ qca8k_setup(struct dsa_switch *ds)  			ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),  					QCA8K_PORT_LOOKUP_MEMBER, -					BIT(QCA8K_CPU_PORT)); +					BIT(cpu_port));  			if (ret)  				return ret; @@ -1063,16 +1201,14 @@ qca8k_setup(struct dsa_switch *ds)  			if (ret)  				return ret;  		} -	} -	/* The port 5 of the qca8337 have some problem in flood condition. The -	 * original legacy driver had some specific buffer and priority settings -	 * for the different port suggested by the QCA switch team. Add this -	 * missing settings to improve switch stability under load condition. -	 * This problem is limited to qca8337 and other qca8k switch are not affected. -	 */ -	if (priv->switch_id == QCA8K_ID_QCA8337) { -		for (i = 0; i < QCA8K_NUM_PORTS; i++) { +		/* The port 5 of the qca8337 have some problem in flood condition. The +		 * original legacy driver had some specific buffer and priority settings +		 * for the different port suggested by the QCA switch team. Add this +		 * missing settings to improve switch stability under load condition. +		 * This problem is limited to qca8337 and other qca8k switch are not affected. +		 */ +		if (priv->switch_id == QCA8K_ID_QCA8337) {  			switch (i) {  			/* The 2 CPU port and port 5 requires some different  			 * priority than any other ports. @@ -1108,6 +1244,12 @@ qca8k_setup(struct dsa_switch *ds)  				  QCA8K_PORT_HOL_CTRL1_WRED_EN,  				  mask);  		} + +		/* Set initial MTU for every port. +		 * We have only have a general MTU setting. So track +		 * every port and set the max across all port. +		 */ +		priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;  	}  	/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ @@ -1121,8 +1263,6 @@ qca8k_setup(struct dsa_switch *ds)  	}  	/* Setup our port MTUs to match power on defaults */ -	for (i = 0; i < QCA8K_NUM_PORTS; i++) -		priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;  	ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);  	if (ret)  		dev_warn(priv->dev, "failed setting MTU settings"); @@ -1137,12 +1277,53 @@ qca8k_setup(struct dsa_switch *ds)  }  static void +qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index, +				      u32 reg) +{ +	u32 delay, val = 0; +	int ret; + +	/* Delay can be declared in 3 different way. +	 * Mode to rgmii and internal-delay standard binding defined +	 * rgmii-id or rgmii-tx/rx phy mode set. +	 * The parse logic set a delay different than 0 only when one +	 * of the 3 different way is used. In all other case delay is +	 * not enabled. With ID or TX/RXID delay is enabled and set +	 * to the default and recommended value. +	 */ +	if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) { +		delay = priv->ports_config.rgmii_tx_delay[cpu_port_index]; + +		val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | +			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; +	} + +	if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) { +		delay = priv->ports_config.rgmii_rx_delay[cpu_port_index]; + +		val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | +			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; +	} + +	/* Set RGMII delay based on the selected values */ +	ret = qca8k_rmw(priv, reg, +			QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK | +			QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK | +			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | +			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN, +			val); +	if (ret) +		dev_err(priv->dev, "Failed to set internal delay for CPU port%d", +			cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6); +} + +static void  qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,  			 const struct phylink_link_state *state)  {  	struct qca8k_priv *priv = ds->priv; +	int cpu_port_index, ret;  	u32 reg, val; -	int ret;  	switch (port) {  	case 0: /* 1st CPU port */ @@ -1154,6 +1335,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,  			return;  		reg = QCA8K_REG_PORT0_PAD_CTRL; +		cpu_port_index = QCA8K_CPU_PORT0;  		break;  	case 1:  	case 2: @@ -1172,6 +1354,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,  			return;  		reg = QCA8K_REG_PORT6_PAD_CTRL; +		cpu_port_index = QCA8K_CPU_PORT6;  		break;  	default:  		dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); @@ -1186,23 +1369,18 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,  	switch (state->interface) {  	case PHY_INTERFACE_MODE_RGMII: -		/* RGMII mode means no delay so don't enable the delay */ -		qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); -		break;  	case PHY_INTERFACE_MODE_RGMII_ID:  	case PHY_INTERFACE_MODE_RGMII_TXID:  	case PHY_INTERFACE_MODE_RGMII_RXID: -		/* RGMII_ID needs internal delay. This is enabled through -		 * PORT5_PAD_CTRL for all ports, rather than individual port -		 * registers +		qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); + +		/* Configure rgmii delay */ +		qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); + +		/* QCA8337 requires to set rgmii rx delay for all ports. +		 * This is enabled through PORT5_PAD_CTRL for all ports, +		 * rather than individual port registers.  		 */ -		qca8k_write(priv, reg, -			    QCA8K_PORT_PAD_RGMII_EN | -			    QCA8K_PORT_PAD_RGMII_TX_DELAY(priv->rgmii_tx_delay) | -			    QCA8K_PORT_PAD_RGMII_RX_DELAY(priv->rgmii_rx_delay) | -			    QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | -			    QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); -		/* QCA8337 requires to set rgmii rx delay */  		if (priv->switch_id == QCA8K_ID_QCA8337)  			qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,  				    QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); @@ -1227,8 +1405,11 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,  		if (ret)  			return; -		val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | -			QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD; +		val |= QCA8K_SGMII_EN_SD; + +		if (priv->ports_config.sgmii_enable_pll) +			val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | +			       QCA8K_SGMII_EN_TX;  		if (dsa_is_cpu_port(ds, port)) {  			/* CPU port, we're talking to the CPU MAC, be a PHY */ @@ -1243,6 +1424,35 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,  		}  		qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); + +		/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and +		 * falling edge is set writing in the PORT0 PAD reg +		 */ +		if (priv->switch_id == QCA8K_ID_QCA8327 || +		    priv->switch_id == QCA8K_ID_QCA8337) +			reg = QCA8K_REG_PORT0_PAD_CTRL; + +		val = 0; + +		/* SGMII Clock phase configuration */ +		if (priv->ports_config.sgmii_rx_clk_falling_edge) +			val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; + +		if (priv->ports_config.sgmii_tx_clk_falling_edge) +			val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; + +		if (val) +			ret = qca8k_rmw(priv, reg, +					QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | +					QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, +					val); + +		/* From original code is reported port instability as SGMII also +		 * require delay set. Apply advised values here or take them from DT. +		 */ +		if (state->interface == PHY_INTERFACE_MODE_SGMII) +			qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); +  		break;  	default:  		dev_err(ds->dev, "xMII mode %s not supported for port %d\n", @@ -1522,10 +1732,15 @@ static int  qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)  {  	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -	int port_mask = BIT(QCA8K_CPU_PORT); +	int port_mask, cpu_port;  	int i, ret; -	for (i = 1; i < QCA8K_NUM_PORTS; i++) { +	cpu_port = dsa_to_port(ds, port)->cpu_dp->index; +	port_mask = BIT(cpu_port); + +	for (i = 0; i < QCA8K_NUM_PORTS; i++) { +		if (dsa_is_cpu_port(ds, i)) +			continue;  		if (dsa_to_port(ds, i)->bridge_dev != br)  			continue;  		/* Add this port to the portvlan mask of the other ports @@ -1551,9 +1766,13 @@ static void  qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)  {  	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -	int i; +	int cpu_port, i; -	for (i = 1; i < QCA8K_NUM_PORTS; i++) { +	cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + +	for (i = 0; i < QCA8K_NUM_PORTS; i++) { +		if (dsa_is_cpu_port(ds, i)) +			continue;  		if (dsa_to_port(ds, i)->bridge_dev != br)  			continue;  		/* Remove this port to the portvlan mask of the other ports @@ -1568,7 +1787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)  	 * this port  	 */  	qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -		  QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT)); +		  QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));  }  static int @@ -1939,7 +2158,12 @@ static int qca8k_resume(struct device *dev)  static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,  			 qca8k_suspend, qca8k_resume); -static const struct qca8k_match_data qca832x = { +static const struct qca8k_match_data qca8327 = { +	.id = QCA8K_ID_QCA8327, +	.reduced_package = true, +}; + +static const struct qca8k_match_data qca8328 = {  	.id = QCA8K_ID_QCA8327,  }; @@ -1948,7 +2172,8 @@ static const struct qca8k_match_data qca833x = {  };  static const struct of_device_id qca8k_of_match[] = { -	{ .compatible = "qca,qca8327", .data = &qca832x }, +	{ .compatible = "qca,qca8327", .data = &qca8327 }, +	{ .compatible = "qca,qca8328", .data = &qca8328 },  	{ .compatible = "qca,qca8334", .data = &qca833x },  	{ .compatible = "qca,qca8337", .data = &qca833x },  	{ /* sentinel */ }, diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index ed3b05ad6745..e10571a398c9 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -13,6 +13,7 @@  #include <linux/gpio.h>  #define QCA8K_NUM_PORTS					7 +#define QCA8K_NUM_CPU_PORTS				2  #define QCA8K_MAX_MTU					9000  #define PHY_ID_QCA8327					0x004dd034 @@ -24,8 +25,6 @@  #define QCA8K_NUM_FDB_RECORDS				2048 -#define QCA8K_CPU_PORT					0 -  #define QCA8K_PORT_VID_DEF				1  /* Global control registers */ @@ -35,16 +34,26 @@  #define   QCA8K_MASK_CTRL_DEVICE_ID_MASK		GENMASK(15, 8)  #define   QCA8K_MASK_CTRL_DEVICE_ID(x)			((x) >> 8)  #define QCA8K_REG_PORT0_PAD_CTRL			0x004 +#define   QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE	BIT(19) +#define   QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE	BIT(18)  #define QCA8K_REG_PORT5_PAD_CTRL			0x008  #define QCA8K_REG_PORT6_PAD_CTRL			0x00c  #define   QCA8K_PORT_PAD_RGMII_EN			BIT(26) +#define   QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK		GENMASK(23, 22)  #define   QCA8K_PORT_PAD_RGMII_TX_DELAY(x)		((x) << 22) +#define   QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK		GENMASK(21, 20)  #define   QCA8K_PORT_PAD_RGMII_RX_DELAY(x)		((x) << 20)  #define	  QCA8K_PORT_PAD_RGMII_TX_DELAY_EN		BIT(25)  #define   QCA8K_PORT_PAD_RGMII_RX_DELAY_EN		BIT(24)  #define   QCA8K_MAX_DELAY				3  #define   QCA8K_PORT_PAD_SGMII_EN			BIT(7)  #define QCA8K_REG_PWS					0x010 +#define   QCA8K_PWS_POWER_ON_SEL			BIT(31) +/* This reg is only valid for QCA832x and toggle the package + * type from 176 pin (by default) to 148 pin used on QCA8327 + */ +#define   QCA8327_PWS_PACKAGE148_EN			BIT(30) +#define   QCA8K_PWS_LED_OPEN_EN_CSR			BIT(24)  #define   QCA8K_PWS_SERDES_AEN_DIS			BIT(7)  #define QCA8K_REG_MODULE_EN				0x030  #define   QCA8K_MODULE_EN_MIB				BIT(0) @@ -100,6 +109,11 @@  #define   QCA8K_SGMII_MODE_CTRL_PHY			(1 << 22)  #define   QCA8K_SGMII_MODE_CTRL_MAC			(2 << 22) +/* MAC_PWR_SEL registers */ +#define QCA8K_REG_MAC_PWR_SEL				0x0e4 +#define   QCA8K_MAC_PWR_RGMII1_1_8V			BIT(18) +#define   QCA8K_MAC_PWR_RGMII0_1_8V			BIT(19) +  /* EEE control registers */  #define QCA8K_REG_EEE_CTRL				0x100  #define  QCA8K_REG_EEE_CTRL_LPI_EN(_i)			((_i + 1) * 2) @@ -248,14 +262,27 @@ struct ar8xxx_port_status {  struct qca8k_match_data {  	u8 id; +	bool reduced_package; +}; + +enum { +	QCA8K_CPU_PORT0, +	QCA8K_CPU_PORT6, +}; + +struct qca8k_ports_config { +	bool sgmii_rx_clk_falling_edge; +	bool sgmii_tx_clk_falling_edge; +	bool sgmii_enable_pll; +	u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ +	u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */  };  struct qca8k_priv {  	u8 switch_id;  	u8 switch_revision; -	u8 rgmii_tx_delay; -	u8 rgmii_rx_delay;  	bool legacy_phy_port_mapping; +	struct qca8k_ports_config ports_config;  	struct regmap *regmap;  	struct mii_bus *bus;  	struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c index 2fcfd917b876..c66ebd0ee217 100644 --- a/drivers/net/dsa/realtek-smi-core.c +++ b/drivers/net/dsa/realtek-smi-core.c @@ -501,6 +501,10 @@ static const struct of_device_id realtek_smi_of_match[] = {  		.compatible = "realtek,rtl8366s",  		.data = NULL,  	}, +	{ +		.compatible = "realtek,rtl8365mb", +		.data = &rtl8365mb_variant, +	},  	{ /* sentinel */ },  };  MODULE_DEVICE_TABLE(of, realtek_smi_of_match); diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h index fcf465f7f922..5bfa53e2480a 100644 --- a/drivers/net/dsa/realtek-smi-core.h +++ b/drivers/net/dsa/realtek-smi-core.h @@ -129,9 +129,6 @@ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,  int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);  int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);  int rtl8366_reset_vlan(struct realtek_smi *smi); -int rtl8366_init_vlan(struct realtek_smi *smi); -int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, -			   struct netlink_ext_ack *extack);  int rtl8366_vlan_add(struct dsa_switch *ds, int port,  		     const struct switchdev_obj_port_vlan *vlan,  		     struct netlink_ext_ack *extack); @@ -143,5 +140,6 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);  void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);  extern const struct realtek_smi_variant rtl8366rb_variant; +extern const struct realtek_smi_variant rtl8365mb_variant;  #endif /*  _REALTEK_SMI_H */ diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c new file mode 100644 index 000000000000..baaae97283c5 --- /dev/null +++ b/drivers/net/dsa/rtl8365mb.c @@ -0,0 +1,1982 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Realtek SMI subdriver for the Realtek RTL8365MB-VC ethernet switch. + * + * Copyright (C) 2021 Alvin Å ipraga <alsi@bang-olufsen.dk> + * Copyright (C) 2021 Michael Rasmussen <mir@bang-olufsen.dk> + * + * The RTL8365MB-VC is a 4+1 port 10/100/1000M switch controller. It includes 4 + * integrated PHYs for the user facing ports, and an extension interface which + * can be connected to the CPU - or another PHY - via either MII, RMII, or + * RGMII. The switch is configured via the Realtek Simple Management Interface + * (SMI), which uses the MDIO/MDC lines. + * + * Below is a simplified block diagram of the chip and its relevant interfaces. + * + *                          .-----------------------------------. + *                          |                                   | + *         UTP <---------------> Giga PHY <-> PCS <-> P0 GMAC   | + *         UTP <---------------> Giga PHY <-> PCS <-> P1 GMAC   | + *         UTP <---------------> Giga PHY <-> PCS <-> P2 GMAC   | + *         UTP <---------------> Giga PHY <-> PCS <-> P3 GMAC   | + *                          |                                   | + *     CPU/PHY <-MII/RMII/RGMII--->  Extension  <---> Extension | + *                          |       interface 1        GMAC 1   | + *                          |                                   | + *     SMI driver/ <-MDC/SCL---> Management    ~~~~~~~~~~~~~~   | + *        EEPROM   <-MDIO/SDA--> interface     ~REALTEK ~~~~~   | + *                          |                  ~RTL8365MB ~~~   | + *                          |                  ~GXXXC TAIWAN~   | + *        GPIO <--------------> Reset          ~~~~~~~~~~~~~~   | + *                          |                                   | + *      Interrupt  <----------> Link UP/DOWN events             | + *      controller          |                                   | + *                          '-----------------------------------' + * + * The driver uses DSA to integrate the 4 user and 1 extension ports into the + * kernel. Netdevices are created for the user ports, as are PHY devices for + * their integrated PHYs. The device tree firmware should also specify the link + * partner of the extension port - either via a fixed-link or other phy-handle. + * See the device tree bindings for more detailed information. Note that the + * driver has only been tested with a fixed-link, but in principle it should not + * matter. + * + * NOTE: Currently, only the RGMII interface is implemented in this driver. + * + * The interrupt line is asserted on link UP/DOWN events. The driver creates a + * custom irqchip to handle this interrupt and demultiplex the events by reading + * the status registers via SMI. Interrupts are then propagated to the relevant + * PHY device. + * + * The EEPROM contains initial register values which the chip will read over I2C + * upon hardware reset. It is also possible to omit the EEPROM. In both cases, + * the driver will manually reprogram some registers using jam tables to reach + * an initial state defined by the vendor driver. + * + * This Linux driver is written based on an OS-agnostic vendor driver from + * Realtek. The reference GPL-licensed sources can be found in the OpenWrt + * source tree under the name rtl8367c. The vendor driver claims to support a + * number of similar switch controllers from Realtek, but the only hardware we + * have is the RTL8365MB-VC. Moreover, there does not seem to be any chip under + * the name RTL8367C. Although one wishes that the 'C' stood for some kind of + * common hardware revision, there exist examples of chips with the suffix -VC + * which are explicitly not supported by the rtl8367c driver and which instead + * require the rtl8367d vendor driver. With all this uncertainty, the driver has + * been modestly named rtl8365mb. Future implementors may wish to rename things + * accordingly. + * + * In the same family of chips, some carry up to 8 user ports and up to 2 + * extension ports. Where possible this driver tries to make things generic, but + * more work must be done to support these configurations. According to + * documentation from Realtek, the family should include the following chips: + * + *  - RTL8363NB + *  - RTL8363NB-VB + *  - RTL8363SC + *  - RTL8363SC-VB + *  - RTL8364NB + *  - RTL8364NB-VB + *  - RTL8365MB-VC + *  - RTL8366SC + *  - RTL8367RB-VB + *  - RTL8367SB + *  - RTL8367S + *  - RTL8370MB + *  - RTL8310SR + * + * Some of the register logic for these additional chips has been skipped over + * while implementing this driver. It is therefore not possible to assume that + * things will work out-of-the-box for other chips, and a careful review of the + * vendor driver may be needed to expand support. The RTL8365MB-VC seems to be + * one of the simpler chips. + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/mutex.h> +#include <linux/of_irq.h> +#include <linux/regmap.h> +#include <linux/if_bridge.h> + +#include "realtek-smi-core.h" + +/* Chip-specific data and limits */ +#define RTL8365MB_CHIP_ID_8365MB_VC		0x6367 +#define RTL8365MB_CPU_PORT_NUM_8365MB_VC	6 +#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC	2112 + +/* Family-specific data and limits */ +#define RTL8365MB_NUM_PHYREGS	32 +#define RTL8365MB_PHYREGMAX	(RTL8365MB_NUM_PHYREGS - 1) +#define RTL8365MB_MAX_NUM_PORTS	(RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1) + +/* Chip identification registers */ +#define RTL8365MB_CHIP_ID_REG		0x1300 + +#define RTL8365MB_CHIP_VER_REG		0x1301 + +#define RTL8365MB_MAGIC_REG		0x13C2 +#define   RTL8365MB_MAGIC_VALUE		0x0249 + +/* Chip reset register */ +#define RTL8365MB_CHIP_RESET_REG	0x1322 +#define RTL8365MB_CHIP_RESET_SW_MASK	0x0002 +#define RTL8365MB_CHIP_RESET_HW_MASK	0x0001 + +/* Interrupt polarity register */ +#define RTL8365MB_INTR_POLARITY_REG	0x1100 +#define   RTL8365MB_INTR_POLARITY_MASK	0x0001 +#define   RTL8365MB_INTR_POLARITY_HIGH	0 +#define   RTL8365MB_INTR_POLARITY_LOW	1 + +/* Interrupt control/status register - enable/check specific interrupt types */ +#define RTL8365MB_INTR_CTRL_REG			0x1101 +#define RTL8365MB_INTR_STATUS_REG		0x1102 +#define   RTL8365MB_INTR_SLIENT_START_2_MASK	0x1000 +#define   RTL8365MB_INTR_SLIENT_START_MASK	0x0800 +#define   RTL8365MB_INTR_ACL_ACTION_MASK	0x0200 +#define   RTL8365MB_INTR_CABLE_DIAG_FIN_MASK	0x0100 +#define   RTL8365MB_INTR_INTERRUPT_8051_MASK	0x0080 +#define   RTL8365MB_INTR_LOOP_DETECTION_MASK	0x0040 +#define   RTL8365MB_INTR_GREEN_TIMER_MASK	0x0020 +#define   RTL8365MB_INTR_SPECIAL_CONGEST_MASK	0x0010 +#define   RTL8365MB_INTR_SPEED_CHANGE_MASK	0x0008 +#define   RTL8365MB_INTR_LEARN_OVER_MASK	0x0004 +#define   RTL8365MB_INTR_METER_EXCEEDED_MASK	0x0002 +#define   RTL8365MB_INTR_LINK_CHANGE_MASK	0x0001 +#define   RTL8365MB_INTR_ALL_MASK                      \ +		(RTL8365MB_INTR_SLIENT_START_2_MASK |  \ +		 RTL8365MB_INTR_SLIENT_START_MASK |    \ +		 RTL8365MB_INTR_ACL_ACTION_MASK |      \ +		 RTL8365MB_INTR_CABLE_DIAG_FIN_MASK |  \ +		 RTL8365MB_INTR_INTERRUPT_8051_MASK |  \ +		 RTL8365MB_INTR_LOOP_DETECTION_MASK |  \ +		 RTL8365MB_INTR_GREEN_TIMER_MASK |     \ +		 RTL8365MB_INTR_SPECIAL_CONGEST_MASK | \ +		 RTL8365MB_INTR_SPEED_CHANGE_MASK |    \ +		 RTL8365MB_INTR_LEARN_OVER_MASK |      \ +		 RTL8365MB_INTR_METER_EXCEEDED_MASK |  \ +		 RTL8365MB_INTR_LINK_CHANGE_MASK) + +/* Per-port interrupt type status registers */ +#define RTL8365MB_PORT_LINKDOWN_IND_REG		0x1106 +#define   RTL8365MB_PORT_LINKDOWN_IND_MASK	0x07FF + +#define RTL8365MB_PORT_LINKUP_IND_REG		0x1107 +#define   RTL8365MB_PORT_LINKUP_IND_MASK	0x07FF + +/* PHY indirect access registers */ +#define RTL8365MB_INDIRECT_ACCESS_CTRL_REG			0x1F00 +#define   RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK		0x0002 +#define   RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ		0 +#define   RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE		1 +#define   RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK		0x0001 +#define   RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE		1 +#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG			0x1F01 +#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG			0x1F02 +#define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK	GENMASK(4, 0) +#define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK		GENMASK(6, 5) +#define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK	GENMASK(11, 8) +#define   RTL8365MB_PHY_BASE					0x2000 +#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG		0x1F03 +#define RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG			0x1F04 + +/* PHY OCP address prefix register */ +#define RTL8365MB_GPHY_OCP_MSB_0_REG			0x1D15 +#define   RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK	0x0FC0 +#define RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK		0xFC00 + +/* The PHY OCP addresses of PHY registers 0~31 start here */ +#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE		0xA400 + +/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */ +#define RTL8365MB_EXT_PORT_MODE_DISABLE		0 +#define RTL8365MB_EXT_PORT_MODE_RGMII		1 +#define RTL8365MB_EXT_PORT_MODE_MII_MAC		2 +#define RTL8365MB_EXT_PORT_MODE_MII_PHY		3 +#define RTL8365MB_EXT_PORT_MODE_TMII_MAC	4 +#define RTL8365MB_EXT_PORT_MODE_TMII_PHY	5 +#define RTL8365MB_EXT_PORT_MODE_GMII		6 +#define RTL8365MB_EXT_PORT_MODE_RMII_MAC	7 +#define RTL8365MB_EXT_PORT_MODE_RMII_PHY	8 +#define RTL8365MB_EXT_PORT_MODE_SGMII		9 +#define RTL8365MB_EXT_PORT_MODE_HSGMII		10 +#define RTL8365MB_EXT_PORT_MODE_1000X_100FX	11 +#define RTL8365MB_EXT_PORT_MODE_1000X		12 +#define RTL8365MB_EXT_PORT_MODE_100FX		13 + +/* EXT port interface mode configuration registers 0~1 */ +#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0		0x1305 +#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1		0x13C3 +#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport)   \ +		(RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \ +		 ((_extport) >> 1) * (0x13C3 - 0x1305)) +#define   RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \ +		(0xF << (((_extport) % 2))) +#define   RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \ +		(((_extport) % 2) * 4) + +/* EXT port RGMII TX/RX delay configuration registers 1~2 */ +#define RTL8365MB_EXT_RGMXF_REG1		0x1307 +#define RTL8365MB_EXT_RGMXF_REG2		0x13C5 +#define RTL8365MB_EXT_RGMXF_REG(_extport)   \ +		(RTL8365MB_EXT_RGMXF_REG1 + \ +		 (((_extport) >> 1) * (0x13C5 - 0x1307))) +#define   RTL8365MB_EXT_RGMXF_RXDELAY_MASK	0x0007 +#define   RTL8365MB_EXT_RGMXF_TXDELAY_MASK	0x0008 + +/* External port speed values - used in DIGITAL_INTERFACE_FORCE */ +#define RTL8365MB_PORT_SPEED_10M	0 +#define RTL8365MB_PORT_SPEED_100M	1 +#define RTL8365MB_PORT_SPEED_1000M	2 + +/* EXT port force configuration registers 0~2 */ +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0			0x1310 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1			0x1311 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2			0x13C4 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport)   \ +		(RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \ +		 ((_extport) & 0x1) +                     \ +		 ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310))) +#define   RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK		0x1000 +#define   RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK		0x0080 +#define   RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK	0x0040 +#define   RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK	0x0020 +#define   RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK		0x0010 +#define   RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK		0x0004 +#define   RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK		0x0003 + +/* CPU port mask register - controls which ports are treated as CPU ports */ +#define RTL8365MB_CPU_PORT_MASK_REG	0x1219 +#define   RTL8365MB_CPU_PORT_MASK_MASK	0x07FF + +/* CPU control register */ +#define RTL8365MB_CPU_CTRL_REG			0x121A +#define   RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK	0x0400 +#define   RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK	0x0200 +#define   RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK	0x0080 +#define   RTL8365MB_CPU_CTRL_TAG_POSITION_MASK	0x0040 +#define   RTL8365MB_CPU_CTRL_TRAP_PORT_MASK	0x0038 +#define   RTL8365MB_CPU_CTRL_INSERTMODE_MASK	0x0006 +#define   RTL8365MB_CPU_CTRL_EN_MASK		0x0001 + +/* Maximum packet length register */ +#define RTL8365MB_CFG0_MAX_LEN_REG	0x088C +#define   RTL8365MB_CFG0_MAX_LEN_MASK	0x3FFF + +/* Port learning limit registers */ +#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE		0x0A20 +#define RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(_physport) \ +		(RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE + (_physport)) + +/* Port isolation (forwarding mask) registers */ +#define RTL8365MB_PORT_ISOLATION_REG_BASE		0x08A2 +#define RTL8365MB_PORT_ISOLATION_REG(_physport) \ +		(RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport)) +#define   RTL8365MB_PORT_ISOLATION_MASK			0x07FF + +/* MSTP port state registers - indexed by tree instancrSTI (tree ine */ +#define RTL8365MB_MSTI_CTRL_BASE			0x0A00 +#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \ +		(RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3)) +#define   RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(_physport) ((_physport) << 1) +#define   RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(_physport) \ +		(0x3 << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET((_physport))) + +/* MIB counter value registers */ +#define RTL8365MB_MIB_COUNTER_BASE	0x1000 +#define RTL8365MB_MIB_COUNTER_REG(_x)	(RTL8365MB_MIB_COUNTER_BASE + (_x)) + +/* MIB counter address register */ +#define RTL8365MB_MIB_ADDRESS_REG		0x1004 +#define   RTL8365MB_MIB_ADDRESS_PORT_OFFSET	0x007C +#define   RTL8365MB_MIB_ADDRESS(_p, _x) \ +		(((RTL8365MB_MIB_ADDRESS_PORT_OFFSET) * (_p) + (_x)) >> 2) + +#define RTL8365MB_MIB_CTRL0_REG			0x1005 +#define   RTL8365MB_MIB_CTRL0_RESET_MASK	0x0002 +#define   RTL8365MB_MIB_CTRL0_BUSY_MASK		0x0001 + +/* The DSA callback .get_stats64 runs in atomic context, so we are not allowed + * to block. On the other hand, accessing MIB counters absolutely requires us to + * block. The solution is thus to schedule work which polls the MIB counters + * asynchronously and updates some private data, which the callback can then + * fetch atomically. Three seconds should be a good enough polling interval. + */ +#define RTL8365MB_STATS_INTERVAL_JIFFIES	(3 * HZ) + +enum rtl8365mb_mib_counter_index { +	RTL8365MB_MIB_ifInOctets, +	RTL8365MB_MIB_dot3StatsFCSErrors, +	RTL8365MB_MIB_dot3StatsSymbolErrors, +	RTL8365MB_MIB_dot3InPauseFrames, +	RTL8365MB_MIB_dot3ControlInUnknownOpcodes, +	RTL8365MB_MIB_etherStatsFragments, +	RTL8365MB_MIB_etherStatsJabbers, +	RTL8365MB_MIB_ifInUcastPkts, +	RTL8365MB_MIB_etherStatsDropEvents, +	RTL8365MB_MIB_ifInMulticastPkts, +	RTL8365MB_MIB_ifInBroadcastPkts, +	RTL8365MB_MIB_inMldChecksumError, +	RTL8365MB_MIB_inIgmpChecksumError, +	RTL8365MB_MIB_inMldSpecificQuery, +	RTL8365MB_MIB_inMldGeneralQuery, +	RTL8365MB_MIB_inIgmpSpecificQuery, +	RTL8365MB_MIB_inIgmpGeneralQuery, +	RTL8365MB_MIB_inMldLeaves, +	RTL8365MB_MIB_inIgmpLeaves, +	RTL8365MB_MIB_etherStatsOctets, +	RTL8365MB_MIB_etherStatsUnderSizePkts, +	RTL8365MB_MIB_etherOversizeStats, +	RTL8365MB_MIB_etherStatsPkts64Octets, +	RTL8365MB_MIB_etherStatsPkts65to127Octets, +	RTL8365MB_MIB_etherStatsPkts128to255Octets, +	RTL8365MB_MIB_etherStatsPkts256to511Octets, +	RTL8365MB_MIB_etherStatsPkts512to1023Octets, +	RTL8365MB_MIB_etherStatsPkts1024to1518Octets, +	RTL8365MB_MIB_ifOutOctets, +	RTL8365MB_MIB_dot3StatsSingleCollisionFrames, +	RTL8365MB_MIB_dot3StatsMultipleCollisionFrames, +	RTL8365MB_MIB_dot3StatsDeferredTransmissions, +	RTL8365MB_MIB_dot3StatsLateCollisions, +	RTL8365MB_MIB_etherStatsCollisions, +	RTL8365MB_MIB_dot3StatsExcessiveCollisions, +	RTL8365MB_MIB_dot3OutPauseFrames, +	RTL8365MB_MIB_ifOutDiscards, +	RTL8365MB_MIB_dot1dTpPortInDiscards, +	RTL8365MB_MIB_ifOutUcastPkts, +	RTL8365MB_MIB_ifOutMulticastPkts, +	RTL8365MB_MIB_ifOutBroadcastPkts, +	RTL8365MB_MIB_outOampduPkts, +	RTL8365MB_MIB_inOampduPkts, +	RTL8365MB_MIB_inIgmpJoinsSuccess, +	RTL8365MB_MIB_inIgmpJoinsFail, +	RTL8365MB_MIB_inMldJoinsSuccess, +	RTL8365MB_MIB_inMldJoinsFail, +	RTL8365MB_MIB_inReportSuppressionDrop, +	RTL8365MB_MIB_inLeaveSuppressionDrop, +	RTL8365MB_MIB_outIgmpReports, +	RTL8365MB_MIB_outIgmpLeaves, +	RTL8365MB_MIB_outIgmpGeneralQuery, +	RTL8365MB_MIB_outIgmpSpecificQuery, +	RTL8365MB_MIB_outMldReports, +	RTL8365MB_MIB_outMldLeaves, +	RTL8365MB_MIB_outMldGeneralQuery, +	RTL8365MB_MIB_outMldSpecificQuery, +	RTL8365MB_MIB_inKnownMulticastPkts, +	RTL8365MB_MIB_END, +}; + +struct rtl8365mb_mib_counter { +	u32 offset; +	u32 length; +	const char *name; +}; + +#define RTL8365MB_MAKE_MIB_COUNTER(_offset, _length, _name) \ +		[RTL8365MB_MIB_ ## _name] = { _offset, _length, #_name } + +static struct rtl8365mb_mib_counter rtl8365mb_mib_counters[] = { +	RTL8365MB_MAKE_MIB_COUNTER(0, 4, ifInOctets), +	RTL8365MB_MAKE_MIB_COUNTER(4, 2, dot3StatsFCSErrors), +	RTL8365MB_MAKE_MIB_COUNTER(6, 2, dot3StatsSymbolErrors), +	RTL8365MB_MAKE_MIB_COUNTER(8, 2, dot3InPauseFrames), +	RTL8365MB_MAKE_MIB_COUNTER(10, 2, dot3ControlInUnknownOpcodes), +	RTL8365MB_MAKE_MIB_COUNTER(12, 2, etherStatsFragments), +	RTL8365MB_MAKE_MIB_COUNTER(14, 2, etherStatsJabbers), +	RTL8365MB_MAKE_MIB_COUNTER(16, 2, ifInUcastPkts), +	RTL8365MB_MAKE_MIB_COUNTER(18, 2, etherStatsDropEvents), +	RTL8365MB_MAKE_MIB_COUNTER(20, 2, ifInMulticastPkts), +	RTL8365MB_MAKE_MIB_COUNTER(22, 2, ifInBroadcastPkts), +	RTL8365MB_MAKE_MIB_COUNTER(24, 2, inMldChecksumError), +	RTL8365MB_MAKE_MIB_COUNTER(26, 2, inIgmpChecksumError), +	RTL8365MB_MAKE_MIB_COUNTER(28, 2, inMldSpecificQuery), +	RTL8365MB_MAKE_MIB_COUNTER(30, 2, inMldGeneralQuery), +	RTL8365MB_MAKE_MIB_COUNTER(32, 2, inIgmpSpecificQuery), +	RTL8365MB_MAKE_MIB_COUNTER(34, 2, inIgmpGeneralQuery), +	RTL8365MB_MAKE_MIB_COUNTER(36, 2, inMldLeaves), +	RTL8365MB_MAKE_MIB_COUNTER(38, 2, inIgmpLeaves), +	RTL8365MB_MAKE_MIB_COUNTER(40, 4, etherStatsOctets), +	RTL8365MB_MAKE_MIB_COUNTER(44, 2, etherStatsUnderSizePkts), +	RTL8365MB_MAKE_MIB_COUNTER(46, 2, etherOversizeStats), +	RTL8365MB_MAKE_MIB_COUNTER(48, 2, etherStatsPkts64Octets), +	RTL8365MB_MAKE_MIB_COUNTER(50, 2, etherStatsPkts65to127Octets), +	RTL8365MB_MAKE_MIB_COUNTER(52, 2, etherStatsPkts128to255Octets), +	RTL8365MB_MAKE_MIB_COUNTER(54, 2, etherStatsPkts256to511Octets), +	RTL8365MB_MAKE_MIB_COUNTER(56, 2, etherStatsPkts512to1023Octets), +	RTL8365MB_MAKE_MIB_COUNTER(58, 2, etherStatsPkts1024to1518Octets), +	RTL8365MB_MAKE_MIB_COUNTER(60, 4, ifOutOctets), +	RTL8365MB_MAKE_MIB_COUNTER(64, 2, dot3StatsSingleCollisionFrames), +	RTL8365MB_MAKE_MIB_COUNTER(66, 2, dot3StatsMultipleCollisionFrames), +	RTL8365MB_MAKE_MIB_COUNTER(68, 2, dot3StatsDeferredTransmissions), +	RTL8365MB_MAKE_MIB_COUNTER(70, 2, dot3StatsLateCollisions), +	RTL8365MB_MAKE_MIB_COUNTER(72, 2, etherStatsCollisions), +	RTL8365MB_MAKE_MIB_COUNTER(74, 2, dot3StatsExcessiveCollisions), +	RTL8365MB_MAKE_MIB_COUNTER(76, 2, dot3OutPauseFrames), +	RTL8365MB_MAKE_MIB_COUNTER(78, 2, ifOutDiscards), +	RTL8365MB_MAKE_MIB_COUNTER(80, 2, dot1dTpPortInDiscards), +	RTL8365MB_MAKE_MIB_COUNTER(82, 2, ifOutUcastPkts), +	RTL8365MB_MAKE_MIB_COUNTER(84, 2, ifOutMulticastPkts), +	RTL8365MB_MAKE_MIB_COUNTER(86, 2, ifOutBroadcastPkts), +	RTL8365MB_MAKE_MIB_COUNTER(88, 2, outOampduPkts), +	RTL8365MB_MAKE_MIB_COUNTER(90, 2, inOampduPkts), +	RTL8365MB_MAKE_MIB_COUNTER(92, 4, inIgmpJoinsSuccess), +	RTL8365MB_MAKE_MIB_COUNTER(96, 2, inIgmpJoinsFail), +	RTL8365MB_MAKE_MIB_COUNTER(98, 2, inMldJoinsSuccess), +	RTL8365MB_MAKE_MIB_COUNTER(100, 2, inMldJoinsFail), +	RTL8365MB_MAKE_MIB_COUNTER(102, 2, inReportSuppressionDrop), +	RTL8365MB_MAKE_MIB_COUNTER(104, 2, inLeaveSuppressionDrop), +	RTL8365MB_MAKE_MIB_COUNTER(106, 2, outIgmpReports), +	RTL8365MB_MAKE_MIB_COUNTER(108, 2, outIgmpLeaves), +	RTL8365MB_MAKE_MIB_COUNTER(110, 2, outIgmpGeneralQuery), +	RTL8365MB_MAKE_MIB_COUNTER(112, 2, outIgmpSpecificQuery), +	RTL8365MB_MAKE_MIB_COUNTER(114, 2, outMldReports), +	RTL8365MB_MAKE_MIB_COUNTER(116, 2, outMldLeaves), +	RTL8365MB_MAKE_MIB_COUNTER(118, 2, outMldGeneralQuery), +	RTL8365MB_MAKE_MIB_COUNTER(120, 2, outMldSpecificQuery), +	RTL8365MB_MAKE_MIB_COUNTER(122, 2, inKnownMulticastPkts), +}; + +static_assert(ARRAY_SIZE(rtl8365mb_mib_counters) == RTL8365MB_MIB_END); + +struct rtl8365mb_jam_tbl_entry { +	u16 reg; +	u16 val; +}; + +/* Lifted from the vendor driver sources */ +static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_8365mb_vc[] = { +	{ 0x13EB, 0x15BB }, { 0x1303, 0x06D6 }, { 0x1304, 0x0700 }, +	{ 0x13E2, 0x003F }, { 0x13F9, 0x0090 }, { 0x121E, 0x03CA }, +	{ 0x1233, 0x0352 }, { 0x1237, 0x00A0 }, { 0x123A, 0x0030 }, +	{ 0x1239, 0x0084 }, { 0x0301, 0x1000 }, { 0x1349, 0x001F }, +	{ 0x18E0, 0x4004 }, { 0x122B, 0x241C }, { 0x1305, 0xC000 }, +	{ 0x13F0, 0x0000 }, +}; + +static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = { +	{ 0x1200, 0x7FCB }, { 0x0884, 0x0003 }, { 0x06EB, 0x0001 }, +	{ 0x03Fa, 0x0007 }, { 0x08C8, 0x00C0 }, { 0x0A30, 0x020E }, +	{ 0x0800, 0x0000 }, { 0x0802, 0x0000 }, { 0x09DA, 0x0013 }, +	{ 0x1D32, 0x0002 }, +}; + +enum rtl8365mb_stp_state { +	RTL8365MB_STP_STATE_DISABLED = 0, +	RTL8365MB_STP_STATE_BLOCKING = 1, +	RTL8365MB_STP_STATE_LEARNING = 2, +	RTL8365MB_STP_STATE_FORWARDING = 3, +}; + +enum rtl8365mb_cpu_insert { +	RTL8365MB_CPU_INSERT_TO_ALL = 0, +	RTL8365MB_CPU_INSERT_TO_TRAPPING = 1, +	RTL8365MB_CPU_INSERT_TO_NONE = 2, +}; + +enum rtl8365mb_cpu_position { +	RTL8365MB_CPU_POS_AFTER_SA = 0, +	RTL8365MB_CPU_POS_BEFORE_CRC = 1, +}; + +enum rtl8365mb_cpu_format { +	RTL8365MB_CPU_FORMAT_8BYTES = 0, +	RTL8365MB_CPU_FORMAT_4BYTES = 1, +}; + +enum rtl8365mb_cpu_rxlen { +	RTL8365MB_CPU_RXLEN_72BYTES = 0, +	RTL8365MB_CPU_RXLEN_64BYTES = 1, +}; + +/** + * struct rtl8365mb_cpu - CPU port configuration + * @enable: enable/disable hardware insertion of CPU tag in switch->CPU frames + * @mask: port mask of ports that parse should parse CPU tags + * @trap_port: forward trapped frames to this port + * @insert: CPU tag insertion mode in switch->CPU frames + * @position: position of CPU tag in frame + * @rx_length: minimum CPU RX length + * @format: CPU tag format + * + * Represents the CPU tagging and CPU port configuration of the switch. These + * settings are configurable at runtime. + */ +struct rtl8365mb_cpu { +	bool enable; +	u32 mask; +	u32 trap_port; +	enum rtl8365mb_cpu_insert insert; +	enum rtl8365mb_cpu_position position; +	enum rtl8365mb_cpu_rxlen rx_length; +	enum rtl8365mb_cpu_format format; +}; + +/** + * struct rtl8365mb_port - private per-port data + * @smi: pointer to parent realtek_smi data + * @index: DSA port index, same as dsa_port::index + * @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic + *         access via rtl8365mb_get_stats64 + * @stats_lock: protect the stats structure during read/update + * @mib_work: delayed work for polling MIB counters + */ +struct rtl8365mb_port { +	struct realtek_smi *smi; +	unsigned int index; +	struct rtnl_link_stats64 stats; +	spinlock_t stats_lock; +	struct delayed_work mib_work; +}; + +/** + * struct rtl8365mb - private chip-specific driver data + * @smi: pointer to parent realtek_smi data + * @irq: registered IRQ or zero + * @chip_id: chip identifier + * @chip_ver: chip silicon revision + * @port_mask: mask of all ports + * @learn_limit_max: maximum number of L2 addresses the chip can learn + * @cpu: CPU tagging and CPU port configuration for this chip + * @mib_lock: prevent concurrent reads of MIB counters + * @ports: per-port data + * @jam_table: chip-specific initialization jam table + * @jam_size: size of the chip's jam table + * + * Private data for this driver. + */ +struct rtl8365mb { +	struct realtek_smi *smi; +	int irq; +	u32 chip_id; +	u32 chip_ver; +	u32 port_mask; +	u32 learn_limit_max; +	struct rtl8365mb_cpu cpu; +	struct mutex mib_lock; +	struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS]; +	const struct rtl8365mb_jam_tbl_entry *jam_table; +	size_t jam_size; +}; + +static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi) +{ +	u32 val; + +	return regmap_read_poll_timeout(smi->map, +					RTL8365MB_INDIRECT_ACCESS_STATUS_REG, +					val, !val, 10, 100); +} + +static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy, +				     u32 ocp_addr) +{ +	u32 val; +	int ret; + +	/* Set OCP prefix */ +	val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr); +	ret = regmap_update_bits( +		smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG, +		RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, +		FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val)); +	if (ret) +		return ret; + +	/* Set PHY register address */ +	val = RTL8365MB_PHY_BASE; +	val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK, phy); +	val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK, +			  ocp_addr >> 1); +	val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK, +			  ocp_addr >> 6); +	ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, +			   val); +	if (ret) +		return ret; + +	return 0; +} + +static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy, +				  u32 ocp_addr, u16 *data) +{ +	u32 val; +	int ret; + +	ret = rtl8365mb_phy_poll_busy(smi); +	if (ret) +		return ret; + +	ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr); +	if (ret) +		return ret; + +	/* Execute read operation */ +	val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK, +			 RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) | +	      FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK, +			 RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ); +	ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val); +	if (ret) +		return ret; + +	ret = rtl8365mb_phy_poll_busy(smi); +	if (ret) +		return ret; + +	/* Get PHY register data */ +	ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, +			  &val); +	if (ret) +		return ret; + +	*data = val & 0xFFFF; + +	return 0; +} + +static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy, +				   u32 ocp_addr, u16 data) +{ +	u32 val; +	int ret; + +	ret = rtl8365mb_phy_poll_busy(smi); +	if (ret) +		return ret; + +	ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr); +	if (ret) +		return ret; + +	/* Set PHY register data */ +	ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, +			   data); +	if (ret) +		return ret; + +	/* Execute write operation */ +	val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK, +			 RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) | +	      FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK, +			 RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE); +	ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val); +	if (ret) +		return ret; + +	ret = rtl8365mb_phy_poll_busy(smi); +	if (ret) +		return ret; + +	return 0; +} + +static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum) +{ +	u32 ocp_addr; +	u16 val; +	int ret; + +	if (regnum > RTL8365MB_PHYREGMAX) +		return -EINVAL; + +	ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2; + +	ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val); +	if (ret) { +		dev_err(smi->dev, +			"failed to read PHY%d reg %02x @ %04x, ret %d\n", phy, +			regnum, ocp_addr, ret); +		return ret; +	} + +	dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n", +		phy, regnum, ocp_addr, val); + +	return val; +} + +static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum, +			       u16 val) +{ +	u32 ocp_addr; +	int ret; + +	if (regnum > RTL8365MB_PHYREGMAX) +		return -EINVAL; + +	ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2; + +	ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val); +	if (ret) { +		dev_err(smi->dev, +			"failed to write PHY%d reg %02x @ %04x, ret %d\n", phy, +			regnum, ocp_addr, ret); +		return ret; +	} + +	dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n", +		phy, regnum, ocp_addr, val); + +	return 0; +} + +static enum dsa_tag_protocol +rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port, +			   enum dsa_tag_protocol mp) +{ +	return DSA_TAG_PROTO_RTL8_4; +} + +static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port, +				      phy_interface_t interface) +{ +	struct device_node *dn; +	struct dsa_port *dp; +	int tx_delay = 0; +	int rx_delay = 0; +	int ext_port; +	u32 val; +	int ret; + +	if (port == smi->cpu_port) { +		ext_port = 1; +	} else { +		dev_err(smi->dev, "only one EXT port is currently supported\n"); +		return -EINVAL; +	} + +	dp = dsa_to_port(smi->ds, port); +	dn = dp->dn; + +	/* Set the RGMII TX/RX delay +	 * +	 * The Realtek vendor driver indicates the following possible +	 * configuration settings: +	 * +	 *   TX delay: +	 *     0 = no delay, 1 = 2 ns delay +	 *   RX delay: +	 *     0 = no delay, 7 = maximum delay +	 *     No units are specified, but there are a total of 8 steps. +	 * +	 * The vendor driver also states that this must be configured *before* +	 * forcing the external interface into a particular mode, which is done +	 * in the rtl8365mb_phylink_mac_link_{up,down} functions. +	 * +	 * Only configure an RGMII TX (resp. RX) delay if the +	 * tx-internal-delay-ps (resp. rx-internal-delay-ps) OF property is +	 * specified. We ignore the detail of the RGMII interface mode +	 * (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only +	 * property. +	 * +	 * For the RX delay, we assume that a register value of 4 corresponds to +	 * 2 ns. But this is just an educated guess, so ignore all other values +	 * to avoid too much confusion. +	 */ +	if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) { +		val = val / 1000; /* convert to ns */ + +		if (val == 0 || val == 2) +			tx_delay = val / 2; +		else +			dev_warn(smi->dev, +				 "EXT port TX delay must be 0 or 2 ns\n"); +	} + +	if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) { +		val = val / 1000; /* convert to ns */ + +		if (val == 0 || val == 2) +			rx_delay = val * 2; +		else +			dev_warn(smi->dev, +				 "EXT port RX delay must be 0 to 2 ns\n"); +	} + +	ret = regmap_update_bits( +		smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port), +		RTL8365MB_EXT_RGMXF_TXDELAY_MASK | +			RTL8365MB_EXT_RGMXF_RXDELAY_MASK, +		FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) | +			FIELD_PREP(RTL8365MB_EXT_RGMXF_RXDELAY_MASK, rx_delay)); +	if (ret) +		return ret; + +	ret = regmap_update_bits( +		smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port), +		RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port), +		RTL8365MB_EXT_PORT_MODE_RGMII +			<< RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET( +				   ext_port)); +	if (ret) +		return ret; + +	return 0; +} + +static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port, +					  bool link, int speed, int duplex, +					  bool tx_pause, bool rx_pause) +{ +	u32 r_tx_pause; +	u32 r_rx_pause; +	u32 r_duplex; +	u32 r_speed; +	u32 r_link; +	int ext_port; +	int val; +	int ret; + +	if (port == smi->cpu_port) { +		ext_port = 1; +	} else { +		dev_err(smi->dev, "only one EXT port is currently supported\n"); +		return -EINVAL; +	} + +	if (link) { +		/* Force the link up with the desired configuration */ +		r_link = 1; +		r_rx_pause = rx_pause ? 1 : 0; +		r_tx_pause = tx_pause ? 1 : 0; + +		if (speed == SPEED_1000) { +			r_speed = RTL8365MB_PORT_SPEED_1000M; +		} else if (speed == SPEED_100) { +			r_speed = RTL8365MB_PORT_SPEED_100M; +		} else if (speed == SPEED_10) { +			r_speed = RTL8365MB_PORT_SPEED_10M; +		} else { +			dev_err(smi->dev, "unsupported port speed %s\n", +				phy_speed_to_str(speed)); +			return -EINVAL; +		} + +		if (duplex == DUPLEX_FULL) { +			r_duplex = 1; +		} else if (duplex == DUPLEX_HALF) { +			r_duplex = 0; +		} else { +			dev_err(smi->dev, "unsupported duplex %s\n", +				phy_duplex_to_str(duplex)); +			return -EINVAL; +		} +	} else { +		/* Force the link down and reset any programmed configuration */ +		r_link = 0; +		r_tx_pause = 0; +		r_rx_pause = 0; +		r_speed = 0; +		r_duplex = 0; +	} + +	val = FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK, 1) | +	      FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK, +			 r_tx_pause) | +	      FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK, +			 r_rx_pause) | +	      FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK, r_link) | +	      FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK, +			 r_duplex) | +	      FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed); +	ret = regmap_write(smi->map, +			   RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port), +			   val); +	if (ret) +		return ret; + +	return 0; +} + +static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port, +					 phy_interface_t interface) +{ +	if (dsa_is_user_port(ds, port) && +	    (interface == PHY_INTERFACE_MODE_NA || +	     interface == PHY_INTERFACE_MODE_INTERNAL)) +		/* Internal PHY */ +		return true; +	else if (dsa_is_cpu_port(ds, port) && +		 phy_interface_mode_is_rgmii(interface)) +		/* Extension MAC */ +		return true; + +	return false; +} + +static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port, +				       unsigned long *supported, +				       struct phylink_link_state *state) +{ +	struct realtek_smi *smi = ds->priv; +	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 }; + +	/* include/linux/phylink.h says: +	 *     When @state->interface is %PHY_INTERFACE_MODE_NA, phylink +	 *     expects the MAC driver to return all supported link modes. +	 */ +	if (state->interface != PHY_INTERFACE_MODE_NA && +	    !rtl8365mb_phy_mode_supported(ds, port, state->interface)) { +		dev_err(smi->dev, "phy mode %s is unsupported on port %d\n", +			phy_modes(state->interface), port); +		linkmode_zero(supported); +		return; +	} + +	phylink_set_port_modes(mask); + +	phylink_set(mask, Autoneg); +	phylink_set(mask, Pause); +	phylink_set(mask, Asym_Pause); + +	phylink_set(mask, 10baseT_Half); +	phylink_set(mask, 10baseT_Full); +	phylink_set(mask, 100baseT_Half); +	phylink_set(mask, 100baseT_Full); +	phylink_set(mask, 1000baseT_Full); + +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask); +} + +static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port, +					 unsigned int mode, +					 const struct phylink_link_state *state) +{ +	struct realtek_smi *smi = ds->priv; +	int ret; + +	if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) { +		dev_err(smi->dev, "phy mode %s is unsupported on port %d\n", +			phy_modes(state->interface), port); +		return; +	} + +	if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) { +		dev_err(smi->dev, +			"port %d supports only conventional PHY or fixed-link\n", +			port); +		return; +	} + +	if (phy_interface_mode_is_rgmii(state->interface)) { +		ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface); +		if (ret) +			dev_err(smi->dev, +				"failed to configure RGMII mode on port %d: %d\n", +				port, ret); +		return; +	} + +	/* TODO: Implement MII and RMII modes, which the RTL8365MB-VC also +	 * supports +	 */ +} + +static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port, +					    unsigned int mode, +					    phy_interface_t interface) +{ +	struct realtek_smi *smi = ds->priv; +	struct rtl8365mb_port *p; +	struct rtl8365mb *mb; +	int ret; + +	mb = smi->chip_data; +	p = &mb->ports[port]; +	cancel_delayed_work_sync(&p->mib_work); + +	if (phy_interface_mode_is_rgmii(interface)) { +		ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0, +						     false, false); +		if (ret) +			dev_err(smi->dev, +				"failed to reset forced mode on port %d: %d\n", +				port, ret); + +		return; +	} +} + +static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port, +					  unsigned int mode, +					  phy_interface_t interface, +					  struct phy_device *phydev, int speed, +					  int duplex, bool tx_pause, +					  bool rx_pause) +{ +	struct realtek_smi *smi = ds->priv; +	struct rtl8365mb_port *p; +	struct rtl8365mb *mb; +	int ret; + +	mb = smi->chip_data; +	p = &mb->ports[port]; +	schedule_delayed_work(&p->mib_work, 0); + +	if (phy_interface_mode_is_rgmii(interface)) { +		ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed, +						     duplex, tx_pause, +						     rx_pause); +		if (ret) +			dev_err(smi->dev, +				"failed to force mode on port %d: %d\n", port, +				ret); + +		return; +	} +} + +static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port, +					 u8 state) +{ +	struct realtek_smi *smi = ds->priv; +	enum rtl8365mb_stp_state val; +	int msti = 0; + +	switch (state) { +	case BR_STATE_DISABLED: +		val = RTL8365MB_STP_STATE_DISABLED; +		break; +	case BR_STATE_BLOCKING: +	case BR_STATE_LISTENING: +		val = RTL8365MB_STP_STATE_BLOCKING; +		break; +	case BR_STATE_LEARNING: +		val = RTL8365MB_STP_STATE_LEARNING; +		break; +	case BR_STATE_FORWARDING: +		val = RTL8365MB_STP_STATE_FORWARDING; +		break; +	default: +		dev_err(smi->dev, "invalid STP state: %u\n", state); +		return; +	} + +	regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port), +			   RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port), +			   val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port)); +} + +static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port, +				       bool enable) +{ +	struct rtl8365mb *mb = smi->chip_data; + +	/* Enable/disable learning by limiting the number of L2 addresses the +	 * port can learn. Realtek documentation states that a limit of zero +	 * disables learning. When enabling learning, set it to the chip's +	 * maximum. +	 */ +	return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port), +			    enable ? mb->learn_limit_max : 0); +} + +static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port, +					u32 mask) +{ +	return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask); +} + +static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port, +				      u32 offset, u32 length, u64 *mibvalue) +{ +	u64 tmpvalue = 0; +	u32 val; +	int ret; +	int i; + +	/* The MIB address is an SRAM address. We request a particular address +	 * and then poll the control register before reading the value from some +	 * counter registers. +	 */ +	ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG, +			   RTL8365MB_MIB_ADDRESS(port, offset)); +	if (ret) +		return ret; + +	/* Poll for completion */ +	ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val, +				       !(val & RTL8365MB_MIB_CTRL0_BUSY_MASK), +				       10, 100); +	if (ret) +		return ret; + +	/* Presumably this indicates a MIB counter read failure */ +	if (val & RTL8365MB_MIB_CTRL0_RESET_MASK) +		return -EIO; + +	/* There are four MIB counter registers each holding a 16 bit word of a +	 * MIB counter. Depending on the offset, we should read from the upper +	 * two or lower two registers. In case the MIB counter is 4 words, we +	 * read from all four registers. +	 */ +	if (length == 4) +		offset = 3; +	else +		offset = (offset + 1) % 4; + +	/* Read the MIB counter 16 bits at a time */ +	for (i = 0; i < length; i++) { +		ret = regmap_read(smi->map, +				  RTL8365MB_MIB_COUNTER_REG(offset - i), &val); +		if (ret) +			return ret; + +		tmpvalue = ((tmpvalue) << 16) | (val & 0xFFFF); +	} + +	/* Only commit the result if no error occurred */ +	*mibvalue = tmpvalue; + +	return 0; +} + +static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) +{ +	struct realtek_smi *smi = ds->priv; +	struct rtl8365mb *mb; +	int ret; +	int i; + +	mb = smi->chip_data; + +	mutex_lock(&mb->mib_lock); +	for (i = 0; i < RTL8365MB_MIB_END; i++) { +		struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i]; + +		ret = rtl8365mb_mib_counter_read(smi, port, mib->offset, +						 mib->length, &data[i]); +		if (ret) { +			dev_err(smi->dev, +				"failed to read port %d counters: %d\n", port, +				ret); +			break; +		} +	} +	mutex_unlock(&mb->mib_lock); +} + +static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data) +{ +	int i; + +	if (stringset != ETH_SS_STATS) +		return; + +	for (i = 0; i < RTL8365MB_MIB_END; i++) { +		struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i]; + +		strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN); +	} +} + +static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ +	if (sset != ETH_SS_STATS) +		return -EOPNOTSUPP; + +	return RTL8365MB_MIB_END; +} + +static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port, +				    struct ethtool_eth_phy_stats *phy_stats) +{ +	struct realtek_smi *smi = ds->priv; +	struct rtl8365mb_mib_counter *mib; +	struct rtl8365mb *mb; + +	mb = smi->chip_data; +	mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors]; + +	mutex_lock(&mb->mib_lock); +	rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length, +				   &phy_stats->SymbolErrorDuringCarrier); +	mutex_unlock(&mb->mib_lock); +} + +static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port, +				    struct ethtool_eth_mac_stats *mac_stats) +{ +	u64 cnt[RTL8365MB_MIB_END] = { +		[RTL8365MB_MIB_ifOutOctets] = 1, +		[RTL8365MB_MIB_ifOutUcastPkts] = 1, +		[RTL8365MB_MIB_ifOutMulticastPkts] = 1, +		[RTL8365MB_MIB_ifOutBroadcastPkts] = 1, +		[RTL8365MB_MIB_dot3OutPauseFrames] = 1, +		[RTL8365MB_MIB_ifOutDiscards] = 1, +		[RTL8365MB_MIB_ifInOctets] = 1, +		[RTL8365MB_MIB_ifInUcastPkts] = 1, +		[RTL8365MB_MIB_ifInMulticastPkts] = 1, +		[RTL8365MB_MIB_ifInBroadcastPkts] = 1, +		[RTL8365MB_MIB_dot3InPauseFrames] = 1, +		[RTL8365MB_MIB_dot3StatsSingleCollisionFrames] = 1, +		[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames] = 1, +		[RTL8365MB_MIB_dot3StatsFCSErrors] = 1, +		[RTL8365MB_MIB_dot3StatsDeferredTransmissions] = 1, +		[RTL8365MB_MIB_dot3StatsLateCollisions] = 1, +		[RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1, + +	}; +	struct realtek_smi *smi = ds->priv; +	struct rtl8365mb *mb; +	int ret; +	int i; + +	mb = smi->chip_data; + +	mutex_lock(&mb->mib_lock); +	for (i = 0; i < RTL8365MB_MIB_END; i++) { +		struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i]; + +		/* Only fetch required MIB counters (marked = 1 above) */ +		if (!cnt[i]) +			continue; + +		ret = rtl8365mb_mib_counter_read(smi, port, mib->offset, +						 mib->length, &cnt[i]); +		if (ret) +			break; +	} +	mutex_unlock(&mb->mib_lock); + +	/* The RTL8365MB-VC exposes MIB objects, which we have to translate into +	 * IEEE 802.3 Managed Objects. This is not always completely faithful, +	 * but we try out best. See RFC 3635 for a detailed treatment of the +	 * subject. +	 */ + +	mac_stats->FramesTransmittedOK = cnt[RTL8365MB_MIB_ifOutUcastPkts] + +					 cnt[RTL8365MB_MIB_ifOutMulticastPkts] + +					 cnt[RTL8365MB_MIB_ifOutBroadcastPkts] + +					 cnt[RTL8365MB_MIB_dot3OutPauseFrames] - +					 cnt[RTL8365MB_MIB_ifOutDiscards]; +	mac_stats->SingleCollisionFrames = +		cnt[RTL8365MB_MIB_dot3StatsSingleCollisionFrames]; +	mac_stats->MultipleCollisionFrames = +		cnt[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames]; +	mac_stats->FramesReceivedOK = cnt[RTL8365MB_MIB_ifInUcastPkts] + +				      cnt[RTL8365MB_MIB_ifInMulticastPkts] + +				      cnt[RTL8365MB_MIB_ifInBroadcastPkts] + +				      cnt[RTL8365MB_MIB_dot3InPauseFrames]; +	mac_stats->FrameCheckSequenceErrors = +		cnt[RTL8365MB_MIB_dot3StatsFCSErrors]; +	mac_stats->OctetsTransmittedOK = cnt[RTL8365MB_MIB_ifOutOctets] - +					 18 * mac_stats->FramesTransmittedOK; +	mac_stats->FramesWithDeferredXmissions = +		cnt[RTL8365MB_MIB_dot3StatsDeferredTransmissions]; +	mac_stats->LateCollisions = cnt[RTL8365MB_MIB_dot3StatsLateCollisions]; +	mac_stats->FramesAbortedDueToXSColls = +		cnt[RTL8365MB_MIB_dot3StatsExcessiveCollisions]; +	mac_stats->OctetsReceivedOK = cnt[RTL8365MB_MIB_ifInOctets] - +				      18 * mac_stats->FramesReceivedOK; +	mac_stats->MulticastFramesXmittedOK = +		cnt[RTL8365MB_MIB_ifOutMulticastPkts]; +	mac_stats->BroadcastFramesXmittedOK = +		cnt[RTL8365MB_MIB_ifOutBroadcastPkts]; +	mac_stats->MulticastFramesReceivedOK = +		cnt[RTL8365MB_MIB_ifInMulticastPkts]; +	mac_stats->BroadcastFramesReceivedOK = +		cnt[RTL8365MB_MIB_ifInBroadcastPkts]; +} + +static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port, +				     struct ethtool_eth_ctrl_stats *ctrl_stats) +{ +	struct realtek_smi *smi = ds->priv; +	struct rtl8365mb_mib_counter *mib; +	struct rtl8365mb *mb; + +	mb = smi->chip_data; +	mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes]; + +	mutex_lock(&mb->mib_lock); +	rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length, +				   &ctrl_stats->UnsupportedOpcodesReceived); +	mutex_unlock(&mb->mib_lock); +} + +static void rtl8365mb_stats_update(struct realtek_smi *smi, int port) +{ +	u64 cnt[RTL8365MB_MIB_END] = { +		[RTL8365MB_MIB_ifOutOctets] = 1, +		[RTL8365MB_MIB_ifOutUcastPkts] = 1, +		[RTL8365MB_MIB_ifOutMulticastPkts] = 1, +		[RTL8365MB_MIB_ifOutBroadcastPkts] = 1, +		[RTL8365MB_MIB_ifOutDiscards] = 1, +		[RTL8365MB_MIB_ifInOctets] = 1, +		[RTL8365MB_MIB_ifInUcastPkts] = 1, +		[RTL8365MB_MIB_ifInMulticastPkts] = 1, +		[RTL8365MB_MIB_ifInBroadcastPkts] = 1, +		[RTL8365MB_MIB_etherStatsDropEvents] = 1, +		[RTL8365MB_MIB_etherStatsCollisions] = 1, +		[RTL8365MB_MIB_etherStatsFragments] = 1, +		[RTL8365MB_MIB_etherStatsJabbers] = 1, +		[RTL8365MB_MIB_dot3StatsFCSErrors] = 1, +		[RTL8365MB_MIB_dot3StatsLateCollisions] = 1, +	}; +	struct rtl8365mb *mb = smi->chip_data; +	struct rtnl_link_stats64 *stats; +	int ret; +	int i; + +	stats = &mb->ports[port].stats; + +	mutex_lock(&mb->mib_lock); +	for (i = 0; i < RTL8365MB_MIB_END; i++) { +		struct rtl8365mb_mib_counter *c = &rtl8365mb_mib_counters[i]; + +		/* Only fetch required MIB counters (marked = 1 above) */ +		if (!cnt[i]) +			continue; + +		ret = rtl8365mb_mib_counter_read(smi, port, c->offset, +						 c->length, &cnt[i]); +		if (ret) +			break; +	} +	mutex_unlock(&mb->mib_lock); + +	/* Don't update statistics if there was an error reading the counters */ +	if (ret) +		return; + +	spin_lock(&mb->ports[port].stats_lock); + +	stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] + +			    cnt[RTL8365MB_MIB_ifInMulticastPkts] + +			    cnt[RTL8365MB_MIB_ifInBroadcastPkts] - +			    cnt[RTL8365MB_MIB_ifOutDiscards]; + +	stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] + +			    cnt[RTL8365MB_MIB_ifOutMulticastPkts] + +			    cnt[RTL8365MB_MIB_ifOutBroadcastPkts]; + +	/* if{In,Out}Octets includes FCS - remove it */ +	stats->rx_bytes = cnt[RTL8365MB_MIB_ifInOctets] - 4 * stats->rx_packets; +	stats->tx_bytes = +		cnt[RTL8365MB_MIB_ifOutOctets] - 4 * stats->tx_packets; + +	stats->rx_dropped = cnt[RTL8365MB_MIB_etherStatsDropEvents]; +	stats->tx_dropped = cnt[RTL8365MB_MIB_ifOutDiscards]; + +	stats->multicast = cnt[RTL8365MB_MIB_ifInMulticastPkts]; +	stats->collisions = cnt[RTL8365MB_MIB_etherStatsCollisions]; + +	stats->rx_length_errors = cnt[RTL8365MB_MIB_etherStatsFragments] + +				  cnt[RTL8365MB_MIB_etherStatsJabbers]; +	stats->rx_crc_errors = cnt[RTL8365MB_MIB_dot3StatsFCSErrors]; +	stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors; + +	stats->tx_aborted_errors = cnt[RTL8365MB_MIB_ifOutDiscards]; +	stats->tx_window_errors = cnt[RTL8365MB_MIB_dot3StatsLateCollisions]; +	stats->tx_errors = stats->tx_aborted_errors + stats->tx_window_errors; + +	spin_unlock(&mb->ports[port].stats_lock); +} + +static void rtl8365mb_stats_poll(struct work_struct *work) +{ +	struct rtl8365mb_port *p = container_of(to_delayed_work(work), +						struct rtl8365mb_port, +						mib_work); +	struct realtek_smi *smi = p->smi; + +	rtl8365mb_stats_update(smi, p->index); + +	schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES); +} + +static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port, +				  struct rtnl_link_stats64 *s) +{ +	struct realtek_smi *smi = ds->priv; +	struct rtl8365mb_port *p; +	struct rtl8365mb *mb; + +	mb = smi->chip_data; +	p = &mb->ports[port]; + +	spin_lock(&p->stats_lock); +	memcpy(s, &p->stats, sizeof(*s)); +	spin_unlock(&p->stats_lock); +} + +static void rtl8365mb_stats_setup(struct realtek_smi *smi) +{ +	struct rtl8365mb *mb = smi->chip_data; +	int i; + +	/* Per-chip global mutex to protect MIB counter access, since doing +	 * so requires accessing a series of registers in a particular order. +	 */ +	mutex_init(&mb->mib_lock); + +	for (i = 0; i < smi->num_ports; i++) { +		struct rtl8365mb_port *p = &mb->ports[i]; + +		if (dsa_is_unused_port(smi->ds, i)) +			continue; + +		/* Per-port spinlock to protect the stats64 data */ +		spin_lock_init(&p->stats_lock); + +		/* This work polls the MIB counters and keeps the stats64 data +		 * up-to-date. +		 */ +		INIT_DELAYED_WORK(&p->mib_work, rtl8365mb_stats_poll); +	} +} + +static void rtl8365mb_stats_teardown(struct realtek_smi *smi) +{ +	struct rtl8365mb *mb = smi->chip_data; +	int i; + +	for (i = 0; i < smi->num_ports; i++) { +		struct rtl8365mb_port *p = &mb->ports[i]; + +		if (dsa_is_unused_port(smi->ds, i)) +			continue; + +		cancel_delayed_work_sync(&p->mib_work); +	} +} + +static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg, +					      u32 *val) +{ +	int ret; + +	ret = regmap_read(smi->map, reg, val); +	if (ret) +		return ret; + +	return regmap_write(smi->map, reg, *val); +} + +static irqreturn_t rtl8365mb_irq(int irq, void *data) +{ +	struct realtek_smi *smi = data; +	unsigned long line_changes = 0; +	struct rtl8365mb *mb; +	u32 stat; +	int line; +	int ret; + +	mb = smi->chip_data; + +	ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG, +						 &stat); +	if (ret) +		goto out_error; + +	if (stat & RTL8365MB_INTR_LINK_CHANGE_MASK) { +		u32 linkdown_ind; +		u32 linkup_ind; +		u32 val; + +		ret = rtl8365mb_get_and_clear_status_reg( +			smi, RTL8365MB_PORT_LINKUP_IND_REG, &val); +		if (ret) +			goto out_error; + +		linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val); + +		ret = rtl8365mb_get_and_clear_status_reg( +			smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val); +		if (ret) +			goto out_error; + +		linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val); + +		line_changes = (linkup_ind | linkdown_ind) & mb->port_mask; +	} + +	if (!line_changes) +		goto out_none; + +	for_each_set_bit(line, &line_changes, smi->num_ports) { +		int child_irq = irq_find_mapping(smi->irqdomain, line); + +		handle_nested_irq(child_irq); +	} + +	return IRQ_HANDLED; + +out_error: +	dev_err(smi->dev, "failed to read interrupt status: %d\n", ret); + +out_none: +	return IRQ_NONE; +} + +static struct irq_chip rtl8365mb_irq_chip = { +	.name = "rtl8365mb", +	/* The hardware doesn't support masking IRQs on a per-port basis */ +}; + +static int rtl8365mb_irq_map(struct irq_domain *domain, unsigned int irq, +			     irq_hw_number_t hwirq) +{ +	irq_set_chip_data(irq, domain->host_data); +	irq_set_chip_and_handler(irq, &rtl8365mb_irq_chip, handle_simple_irq); +	irq_set_nested_thread(irq, 1); +	irq_set_noprobe(irq); + +	return 0; +} + +static void rtl8365mb_irq_unmap(struct irq_domain *d, unsigned int irq) +{ +	irq_set_nested_thread(irq, 0); +	irq_set_chip_and_handler(irq, NULL, NULL); +	irq_set_chip_data(irq, NULL); +} + +static const struct irq_domain_ops rtl8365mb_irqdomain_ops = { +	.map = rtl8365mb_irq_map, +	.unmap = rtl8365mb_irq_unmap, +	.xlate = irq_domain_xlate_onecell, +}; + +static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable) +{ +	return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG, +				  RTL8365MB_INTR_LINK_CHANGE_MASK, +				  FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK, +					     enable ? 1 : 0)); +} + +static int rtl8365mb_irq_enable(struct realtek_smi *smi) +{ +	return rtl8365mb_set_irq_enable(smi, true); +} + +static int rtl8365mb_irq_disable(struct realtek_smi *smi) +{ +	return rtl8365mb_set_irq_enable(smi, false); +} + +static int rtl8365mb_irq_setup(struct realtek_smi *smi) +{ +	struct rtl8365mb *mb = smi->chip_data; +	struct device_node *intc; +	u32 irq_trig; +	int virq; +	int irq; +	u32 val; +	int ret; +	int i; + +	intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller"); +	if (!intc) { +		dev_err(smi->dev, "missing child interrupt-controller node\n"); +		return -EINVAL; +	} + +	/* rtl8365mb IRQs cascade off this one */ +	irq = of_irq_get(intc, 0); +	if (irq <= 0) { +		if (irq != -EPROBE_DEFER) +			dev_err(smi->dev, "failed to get parent irq: %d\n", +				irq); +		ret = irq ? irq : -EINVAL; +		goto out_put_node; +	} + +	smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports, +					       &rtl8365mb_irqdomain_ops, smi); +	if (!smi->irqdomain) { +		dev_err(smi->dev, "failed to add irq domain\n"); +		ret = -ENOMEM; +		goto out_put_node; +	} + +	for (i = 0; i < smi->num_ports; i++) { +		virq = irq_create_mapping(smi->irqdomain, i); +		if (!virq) { +			dev_err(smi->dev, +				"failed to create irq domain mapping\n"); +			ret = -EINVAL; +			goto out_remove_irqdomain; +		} + +		irq_set_parent(virq, irq); +	} + +	/* Configure chip interrupt signal polarity */ +	irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); +	switch (irq_trig) { +	case IRQF_TRIGGER_RISING: +	case IRQF_TRIGGER_HIGH: +		val = RTL8365MB_INTR_POLARITY_HIGH; +		break; +	case IRQF_TRIGGER_FALLING: +	case IRQF_TRIGGER_LOW: +		val = RTL8365MB_INTR_POLARITY_LOW; +		break; +	default: +		dev_err(smi->dev, "unsupported irq trigger type %u\n", +			irq_trig); +		ret = -EINVAL; +		goto out_remove_irqdomain; +	} + +	ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG, +				 RTL8365MB_INTR_POLARITY_MASK, +				 FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val)); +	if (ret) +		goto out_remove_irqdomain; + +	/* Disable the interrupt in case the chip has it enabled on reset */ +	ret = rtl8365mb_irq_disable(smi); +	if (ret) +		goto out_remove_irqdomain; + +	/* Clear the interrupt status register */ +	ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG, +			   RTL8365MB_INTR_ALL_MASK); +	if (ret) +		goto out_remove_irqdomain; + +	ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT, +				   "rtl8365mb", smi); +	if (ret) { +		dev_err(smi->dev, "failed to request irq: %d\n", ret); +		goto out_remove_irqdomain; +	} + +	/* Store the irq so that we know to free it during teardown */ +	mb->irq = irq; + +	ret = rtl8365mb_irq_enable(smi); +	if (ret) +		goto out_free_irq; + +	of_node_put(intc); + +	return 0; + +out_free_irq: +	free_irq(mb->irq, smi); +	mb->irq = 0; + +out_remove_irqdomain: +	for (i = 0; i < smi->num_ports; i++) { +		virq = irq_find_mapping(smi->irqdomain, i); +		irq_dispose_mapping(virq); +	} + +	irq_domain_remove(smi->irqdomain); +	smi->irqdomain = NULL; + +out_put_node: +	of_node_put(intc); + +	return ret; +} + +static void rtl8365mb_irq_teardown(struct realtek_smi *smi) +{ +	struct rtl8365mb *mb = smi->chip_data; +	int virq; +	int i; + +	if (mb->irq) { +		free_irq(mb->irq, smi); +		mb->irq = 0; +	} + +	if (smi->irqdomain) { +		for (i = 0; i < smi->num_ports; i++) { +			virq = irq_find_mapping(smi->irqdomain, i); +			irq_dispose_mapping(virq); +		} + +		irq_domain_remove(smi->irqdomain); +		smi->irqdomain = NULL; +	} +} + +static int rtl8365mb_cpu_config(struct realtek_smi *smi) +{ +	struct rtl8365mb *mb = smi->chip_data; +	struct rtl8365mb_cpu *cpu = &mb->cpu; +	u32 val; +	int ret; + +	ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG, +				 RTL8365MB_CPU_PORT_MASK_MASK, +				 FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK, +					    cpu->mask)); +	if (ret) +		return ret; + +	val = FIELD_PREP(RTL8365MB_CPU_CTRL_EN_MASK, cpu->enable ? 1 : 0) | +	      FIELD_PREP(RTL8365MB_CPU_CTRL_INSERTMODE_MASK, cpu->insert) | +	      FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) | +	      FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) | +	      FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) | +	      FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) | +	      FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK, +			 cpu->trap_port >> 3); +	ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val); +	if (ret) +		return ret; + +	return 0; +} + +static int rtl8365mb_switch_init(struct realtek_smi *smi) +{ +	struct rtl8365mb *mb = smi->chip_data; +	int ret; +	int i; + +	/* Do any chip-specific init jam before getting to the common stuff */ +	if (mb->jam_table) { +		for (i = 0; i < mb->jam_size; i++) { +			ret = regmap_write(smi->map, mb->jam_table[i].reg, +					   mb->jam_table[i].val); +			if (ret) +				return ret; +		} +	} + +	/* Common init jam */ +	for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) { +		ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg, +				   rtl8365mb_init_jam_common[i].val); +		if (ret) +			return ret; +	} + +	return 0; +} + +static int rtl8365mb_reset_chip(struct realtek_smi *smi) +{ +	u32 val; + +	realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG, +				    FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK, +					       1)); + +	/* Realtek documentation says the chip needs 1 second to reset. Sleep +	 * for 100 ms before accessing any registers to prevent ACK timeouts. +	 */ +	msleep(100); +	return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val, +					!(val & RTL8365MB_CHIP_RESET_HW_MASK), +					20000, 1e6); +} + +static int rtl8365mb_setup(struct dsa_switch *ds) +{ +	struct realtek_smi *smi = ds->priv; +	struct rtl8365mb *mb; +	int ret; +	int i; + +	mb = smi->chip_data; + +	ret = rtl8365mb_reset_chip(smi); +	if (ret) { +		dev_err(smi->dev, "failed to reset chip: %d\n", ret); +		goto out_error; +	} + +	/* Configure switch to vendor-defined initial state */ +	ret = rtl8365mb_switch_init(smi); +	if (ret) { +		dev_err(smi->dev, "failed to initialize switch: %d\n", ret); +		goto out_error; +	} + +	/* Set up cascading IRQs */ +	ret = rtl8365mb_irq_setup(smi); +	if (ret == -EPROBE_DEFER) +		return ret; +	else if (ret) +		dev_info(smi->dev, "no interrupt support\n"); + +	/* Configure CPU tagging */ +	ret = rtl8365mb_cpu_config(smi); +	if (ret) +		goto out_teardown_irq; + +	/* Configure ports */ +	for (i = 0; i < smi->num_ports; i++) { +		struct rtl8365mb_port *p = &mb->ports[i]; + +		if (dsa_is_unused_port(smi->ds, i)) +			continue; + +		/* Set up per-port private data */ +		p->smi = smi; +		p->index = i; + +		/* Forward only to the CPU */ +		ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port)); +		if (ret) +			goto out_teardown_irq; + +		/* Disable learning */ +		ret = rtl8365mb_port_set_learning(smi, i, false); +		if (ret) +			goto out_teardown_irq; + +		/* Set the initial STP state of all ports to DISABLED, otherwise +		 * ports will still forward frames to the CPU despite being +		 * administratively down by default. +		 */ +		rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED); +	} + +	/* Set maximum packet length to 1536 bytes */ +	ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG, +				 RTL8365MB_CFG0_MAX_LEN_MASK, +				 FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536)); +	if (ret) +		goto out_teardown_irq; + +	ret = realtek_smi_setup_mdio(smi); +	if (ret) { +		dev_err(smi->dev, "could not set up MDIO bus\n"); +		goto out_teardown_irq; +	} + +	/* Start statistics counter polling */ +	rtl8365mb_stats_setup(smi); + +	return 0; + +out_teardown_irq: +	rtl8365mb_irq_teardown(smi); + +out_error: +	return ret; +} + +static void rtl8365mb_teardown(struct dsa_switch *ds) +{ +	struct realtek_smi *smi = ds->priv; + +	rtl8365mb_stats_teardown(smi); +	rtl8365mb_irq_teardown(smi); +} + +static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver) +{ +	int ret; + +	/* For some reason we have to write a magic value to an arbitrary +	 * register whenever accessing the chip ID/version registers. +	 */ +	ret = regmap_write(map, RTL8365MB_MAGIC_REG, RTL8365MB_MAGIC_VALUE); +	if (ret) +		return ret; + +	ret = regmap_read(map, RTL8365MB_CHIP_ID_REG, id); +	if (ret) +		return ret; + +	ret = regmap_read(map, RTL8365MB_CHIP_VER_REG, ver); +	if (ret) +		return ret; + +	/* Reset magic register */ +	ret = regmap_write(map, RTL8365MB_MAGIC_REG, 0); +	if (ret) +		return ret; + +	return 0; +} + +static int rtl8365mb_detect(struct realtek_smi *smi) +{ +	struct rtl8365mb *mb = smi->chip_data; +	u32 chip_id; +	u32 chip_ver; +	int ret; + +	ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver); +	if (ret) { +		dev_err(smi->dev, "failed to read chip id and version: %d\n", +			ret); +		return ret; +	} + +	switch (chip_id) { +	case RTL8365MB_CHIP_ID_8365MB_VC: +		dev_info(smi->dev, +			 "found an RTL8365MB-VC switch (ver=0x%04x)\n", +			 chip_ver); + +		smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC; +		smi->num_ports = smi->cpu_port + 1; + +		mb->smi = smi; +		mb->chip_id = chip_id; +		mb->chip_ver = chip_ver; +		mb->port_mask = BIT(smi->num_ports) - 1; +		mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC; +		mb->jam_table = rtl8365mb_init_jam_8365mb_vc; +		mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc); + +		mb->cpu.enable = 1; +		mb->cpu.mask = BIT(smi->cpu_port); +		mb->cpu.trap_port = smi->cpu_port; +		mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL; +		mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA; +		mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES; +		mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES; + +		break; +	default: +		dev_err(smi->dev, +			"found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n", +			chip_id, chip_ver); +		return -ENODEV; +	} + +	return 0; +} + +static const struct dsa_switch_ops rtl8365mb_switch_ops = { +	.get_tag_protocol = rtl8365mb_get_tag_protocol, +	.setup = rtl8365mb_setup, +	.teardown = rtl8365mb_teardown, +	.phylink_validate = rtl8365mb_phylink_validate, +	.phylink_mac_config = rtl8365mb_phylink_mac_config, +	.phylink_mac_link_down = rtl8365mb_phylink_mac_link_down, +	.phylink_mac_link_up = rtl8365mb_phylink_mac_link_up, +	.port_stp_state_set = rtl8365mb_port_stp_state_set, +	.get_strings = rtl8365mb_get_strings, +	.get_ethtool_stats = rtl8365mb_get_ethtool_stats, +	.get_sset_count = rtl8365mb_get_sset_count, +	.get_eth_phy_stats = rtl8365mb_get_phy_stats, +	.get_eth_mac_stats = rtl8365mb_get_mac_stats, +	.get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats, +	.get_stats64 = rtl8365mb_get_stats64, +}; + +static const struct realtek_smi_ops rtl8365mb_smi_ops = { +	.detect = rtl8365mb_detect, +	.phy_read = rtl8365mb_phy_read, +	.phy_write = rtl8365mb_phy_write, +}; + +const struct realtek_smi_variant rtl8365mb_variant = { +	.ds_ops = &rtl8365mb_switch_ops, +	.ops = &rtl8365mb_smi_ops, +	.clk_delay = 10, +	.cmd_read = 0xb9, +	.cmd_write = 0xb8, +	.chip_data_sz = sizeof(struct rtl8365mb), +}; +EXPORT_SYMBOL_GPL(rtl8365mb_variant); diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 75897a369096..bdb8d8d34880 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -292,89 +292,6 @@ int rtl8366_reset_vlan(struct realtek_smi *smi)  }  EXPORT_SYMBOL_GPL(rtl8366_reset_vlan); -int rtl8366_init_vlan(struct realtek_smi *smi) -{ -	int port; -	int ret; - -	ret = rtl8366_reset_vlan(smi); -	if (ret) -		return ret; - -	/* Loop over the available ports, for each port, associate -	 * it with the VLAN (port+1) -	 */ -	for (port = 0; port < smi->num_ports; port++) { -		u32 mask; - -		if (port == smi->cpu_port) -			/* For the CPU port, make all ports members of this -			 * VLAN. -			 */ -			mask = GENMASK((int)smi->num_ports - 1, 0); -		else -			/* For all other ports, enable itself plus the -			 * CPU port. -			 */ -			mask = BIT(port) | BIT(smi->cpu_port); - -		/* For each port, set the port as member of VLAN (port+1) -		 * and untagged, except for the CPU port: the CPU port (5) is -		 * member of VLAN 6 and so are ALL the other ports as well. -		 * Use filter 0 (no filter). -		 */ -		dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n", -			 (port + 1), port, mask); -		ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0); -		if (ret) -			return ret; - -		dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n", -			 (port + 1), port, (port + 1)); -		ret = rtl8366_set_pvid(smi, port, (port + 1)); -		if (ret) -			return ret; -	} - -	return rtl8366_enable_vlan(smi, true); -} -EXPORT_SYMBOL_GPL(rtl8366_init_vlan); - -int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, -			   struct netlink_ext_ack *extack) -{ -	struct realtek_smi *smi = ds->priv; -	struct rtl8366_vlan_4k vlan4k; -	int ret; - -	/* Use VLAN nr port + 1 since VLAN0 is not valid */ -	if (!smi->ops->is_vlan_valid(smi, port + 1)) -		return -EINVAL; - -	dev_info(smi->dev, "%s filtering on port %d\n", -		 vlan_filtering ? "enable" : "disable", -		 port); - -	/* TODO: -	 * The hardware support filter ID (FID) 0..7, I have no clue how to -	 * support this in the driver when the callback only says on/off. -	 */ -	ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k); -	if (ret) -		return ret; - -	/* Just set the filter to FID 1 for now then */ -	ret = rtl8366_set_vlan(smi, port + 1, -			       vlan4k.member, -			       vlan4k.untag, -			       1); -	if (ret) -		return ret; - -	return 0; -} -EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering); -  int rtl8366_vlan_add(struct dsa_switch *ds, int port,  		     const struct switchdev_obj_port_vlan *vlan,  		     struct netlink_ext_ack *extack) @@ -401,12 +318,9 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,  		return ret;  	} -	dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n", -		 vlan->vid, port, untagged ? "untagged" : "tagged", -		 pvid ? " PVID" : "no PVID"); - -	if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) -		dev_err(smi->dev, "port is DSA or CPU port\n"); +	dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n", +		vlan->vid, port, untagged ? "untagged" : "tagged", +		pvid ? "PVID" : "no PVID");  	member |= BIT(port); @@ -439,7 +353,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,  	struct realtek_smi *smi = ds->priv;  	int ret, i; -	dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port); +	dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);  	for (i = 0; i < smi->num_vlan_mc; i++) {  		struct rtl8366_vlan_mc vlanmc; @@ -457,7 +371,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,  			 * anymore then clear the whole member  			 * config so it can be reused.  			 */ -			if (!vlanmc.member && vlanmc.untag) { +			if (!vlanmc.member) {  				vlanmc.vid = 0;  				vlanmc.priority = 0;  				vlanmc.fid = 0; diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c index a89093bc6c6a..03deacd83e61 100644 --- a/drivers/net/dsa/rtl8366rb.c +++ b/drivers/net/dsa/rtl8366rb.c @@ -14,6 +14,7 @@  #include <linux/bitops.h>  #include <linux/etherdevice.h> +#include <linux/if_bridge.h>  #include <linux/interrupt.h>  #include <linux/irqdomain.h>  #include <linux/irqchip/chained_irq.h> @@ -42,9 +43,12 @@  /* Port Enable Control register */  #define RTL8366RB_PECR				0x0001 -/* Switch Security Control registers */ -#define RTL8366RB_SSCR0				0x0002 -#define RTL8366RB_SSCR1				0x0003 +/* Switch per-port learning disablement register */ +#define RTL8366RB_PORT_LEARNDIS_CTRL		0x0002 + +/* Security control, actually aging register */ +#define RTL8366RB_SECURITY_CTRL			0x0003 +  #define RTL8366RB_SSCR2				0x0004  #define RTL8366RB_SSCR2_DROP_UNKNOWN_DA		BIT(0) @@ -106,6 +110,18 @@  #define RTL8366RB_POWER_SAVING_REG	0x0021 +/* Spanning tree status (STP) control, two bits per port per FID */ +#define RTL8366RB_STP_STATE_BASE	0x0050 /* 0x0050..0x0057 */ +#define RTL8366RB_STP_STATE_DISABLED	0x0 +#define RTL8366RB_STP_STATE_BLOCKING	0x1 +#define RTL8366RB_STP_STATE_LEARNING	0x2 +#define RTL8366RB_STP_STATE_FORWARDING	0x3 +#define RTL8366RB_STP_MASK		GENMASK(1, 0) +#define RTL8366RB_STP_STATE(port, state) \ +	((state) << ((port) * 2)) +#define RTL8366RB_STP_STATE_MASK(port) \ +	RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK) +  /* CPU port control reg */  #define RTL8368RB_CPU_CTRL_REG		0x0061  #define RTL8368RB_CPU_PORTS_MSK		0x00FF @@ -143,6 +159,21 @@  #define RTL8366RB_PHY_NO_OFFSET			9  #define RTL8366RB_PHY_NO_MASK			(0x1f << 9) +/* VLAN Ingress Control Register 1, one bit per port. + * bit 0 .. 5 will make the switch drop ingress frames without + * VID such as untagged or priority-tagged frames for respective + * port. + * bit 6 .. 11 will make the switch drop ingress frames carrying + * a C-tag with VID != 0 for respective port. + */ +#define RTL8366RB_VLAN_INGRESS_CTRL1_REG	0x037E +#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port)	(BIT((port)) | BIT((port) + 6)) + +/* VLAN Ingress Control Register 2, one bit per port. + * bit0 .. bit5 will make the switch drop all ingress frames with + * a VLAN classification that does not include the port is in its + * member set. + */  #define RTL8366RB_VLAN_INGRESS_CTRL2_REG	0x037f  /* LED control registers */ @@ -215,6 +246,7 @@  #define RTL8366RB_NUM_LEDGROUPS		4  #define RTL8366RB_NUM_VIDS		4096  #define RTL8366RB_PRIORITYMAX		7 +#define RTL8366RB_NUM_FIDS		8  #define RTL8366RB_FIDMAX		7  #define RTL8366RB_PORT_1		BIT(0) /* In userspace port 0 */ @@ -300,6 +332,13 @@  #define RTL8366RB_INTERRUPT_STATUS_REG	0x0442  #define RTL8366RB_NUM_INTERRUPT		14 /* 0..13 */ +/* Port isolation registers */ +#define RTL8366RB_PORT_ISO_BASE		0x0F08 +#define RTL8366RB_PORT_ISO(pnum)	(RTL8366RB_PORT_ISO_BASE + (pnum)) +#define RTL8366RB_PORT_ISO_EN		BIT(0) +#define RTL8366RB_PORT_ISO_PORTS_MASK	GENMASK(7, 1) +#define RTL8366RB_PORT_ISO_PORTS(pmask)	((pmask) << 1) +  /* bits 0..5 enable force when cleared */  #define RTL8366RB_MAC_FORCE_CTRL_REG	0x0F11 @@ -314,9 +353,11 @@  /**   * struct rtl8366rb - RTL8366RB-specific data   * @max_mtu: per-port max MTU setting + * @pvid_enabled: if PVID is set for respective port   */  struct rtl8366rb {  	unsigned int max_mtu[RTL8366RB_NUM_PORTS]; +	bool pvid_enabled[RTL8366RB_NUM_PORTS];  };  static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { @@ -835,6 +876,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds)  	if (ret)  		return ret; +	/* Isolate all user ports so they can only send packets to itself and the CPU port */ +	for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { +		ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i), +				   RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) | +				   RTL8366RB_PORT_ISO_EN); +		if (ret) +			return ret; +	} +	/* CPU port can send packets to all ports */ +	ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU), +			   RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) | +			   RTL8366RB_PORT_ISO_EN); +	if (ret) +		return ret; +  	/* Set up the "green ethernet" feature */  	ret = rtl8366rb_jam_table(rtl8366rb_green_jam,  				  ARRAY_SIZE(rtl8366rb_green_jam), smi, false); @@ -888,13 +944,14 @@ static int rtl8366rb_setup(struct dsa_switch *ds)  		/* layer 2 size, see rtl8366rb_change_mtu() */  		rb->max_mtu[i] = 1532; -	/* Enable learning for all ports */ -	ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0); +	/* Disable learning for all ports */ +	ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL, +			   RTL8366RB_PORT_ALL);  	if (ret)  		return ret;  	/* Enable auto ageing for all ports */ -	ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0); +	ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);  	if (ret)  		return ret; @@ -911,11 +968,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds)  	if (ret)  		return ret; -	/* Discard VLAN tagged packets if the port is not a member of -	 * the VLAN with which the packets is associated. -	 */ +	/* Accept all packets by default, we enable filtering on-demand */ +	ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG, +			   0); +	if (ret) +		return ret;  	ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, -			   RTL8366RB_PORT_ALL); +			   0);  	if (ret)  		return ret; @@ -963,7 +1022,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)  			return ret;  	} -	ret = rtl8366_init_vlan(smi); +	ret = rtl8366_reset_vlan(smi);  	if (ret)  		return ret; @@ -977,8 +1036,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds)  		return -ENODEV;  	} -	ds->configure_vlan_while_not_filtering = false; -  	return 0;  } @@ -1127,6 +1184,190 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)  	rb8366rb_set_port_led(smi, port, false);  } +static int +rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port, +			   struct net_device *bridge) +{ +	struct realtek_smi *smi = ds->priv; +	unsigned int port_bitmap = 0; +	int ret, i; + +	/* Loop over all other ports than the current one */ +	for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { +		/* Current port handled last */ +		if (i == port) +			continue; +		/* Not on this bridge */ +		if (dsa_to_port(ds, i)->bridge_dev != bridge) +			continue; +		/* Join this port to each other port on the bridge */ +		ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), +					 RTL8366RB_PORT_ISO_PORTS(BIT(port)), +					 RTL8366RB_PORT_ISO_PORTS(BIT(port))); +		if (ret) +			dev_err(smi->dev, "failed to join port %d\n", port); + +		port_bitmap |= BIT(i); +	} + +	/* Set the bits for the ports we can access */ +	return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port), +				  RTL8366RB_PORT_ISO_PORTS(port_bitmap), +				  RTL8366RB_PORT_ISO_PORTS(port_bitmap)); +} + +static void +rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port, +			    struct net_device *bridge) +{ +	struct realtek_smi *smi = ds->priv; +	unsigned int port_bitmap = 0; +	int ret, i; + +	/* Loop over all other ports than this one */ +	for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { +		/* Current port handled last */ +		if (i == port) +			continue; +		/* Not on this bridge */ +		if (dsa_to_port(ds, i)->bridge_dev != bridge) +			continue; +		/* Remove this port from any other port on the bridge */ +		ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), +					 RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0); +		if (ret) +			dev_err(smi->dev, "failed to leave port %d\n", port); + +		port_bitmap |= BIT(i); +	} + +	/* Clear the bits for the ports we can not access, leave ourselves */ +	regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port), +			   RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0); +} + +/** + * rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames + * @smi: SMI state container + * @port: the port to drop untagged and C-tagged frames on + * @drop: whether to drop or pass untagged and C-tagged frames + */ +static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop) +{ +	return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG, +				  RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port), +				  drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0); +} + +static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port, +				    bool vlan_filtering, +				    struct netlink_ext_ack *extack) +{ +	struct realtek_smi *smi = ds->priv; +	struct rtl8366rb *rb; +	int ret; + +	rb = smi->chip_data; + +	dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port, +		vlan_filtering ? "enable" : "disable"); + +	/* If the port is not in the member set, the frame will be dropped */ +	ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, +				 BIT(port), vlan_filtering ? BIT(port) : 0); +	if (ret) +		return ret; + +	/* If VLAN filtering is enabled and PVID is also enabled, we must +	 * not drop any untagged or C-tagged frames. If we turn off VLAN +	 * filtering on a port, we need to accept any frames. +	 */ +	if (vlan_filtering) +		ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]); +	else +		ret = rtl8366rb_drop_untagged(smi, port, false); + +	return ret; +} + +static int +rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port, +				struct switchdev_brport_flags flags, +				struct netlink_ext_ack *extack) +{ +	/* We support enabling/disabling learning */ +	if (flags.mask & ~(BR_LEARNING)) +		return -EINVAL; + +	return 0; +} + +static int +rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port, +			    struct switchdev_brport_flags flags, +			    struct netlink_ext_ack *extack) +{ +	struct realtek_smi *smi = ds->priv; +	int ret; + +	if (flags.mask & BR_LEARNING) { +		ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL, +					 BIT(port), +					 (flags.val & BR_LEARNING) ? 0 : BIT(port)); +		if (ret) +			return ret; +	} + +	return 0; +} + +static void +rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) +{ +	struct realtek_smi *smi = ds->priv; +	u32 val; +	int i; + +	switch (state) { +	case BR_STATE_DISABLED: +		val = RTL8366RB_STP_STATE_DISABLED; +		break; +	case BR_STATE_BLOCKING: +	case BR_STATE_LISTENING: +		val = RTL8366RB_STP_STATE_BLOCKING; +		break; +	case BR_STATE_LEARNING: +		val = RTL8366RB_STP_STATE_LEARNING; +		break; +	case BR_STATE_FORWARDING: +		val = RTL8366RB_STP_STATE_FORWARDING; +		break; +	default: +		dev_err(smi->dev, "unknown bridge state requested\n"); +		return; +	} + +	/* Set the same status for the port on all the FIDs */ +	for (i = 0; i < RTL8366RB_NUM_FIDS; i++) { +		regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i, +				   RTL8366RB_STP_STATE_MASK(port), +				   RTL8366RB_STP_STATE(port, val)); +	} +} + +static void +rtl8366rb_port_fast_age(struct dsa_switch *ds, int port) +{ +	struct realtek_smi *smi = ds->priv; + +	/* This will age out any learned L2 entries */ +	regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL, +			   BIT(port), BIT(port)); +	/* Restore the normal state of things */ +	regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL, +			   BIT(port), 0); +} +  static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)  {  	struct realtek_smi *smi = ds->priv; @@ -1338,24 +1579,44 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)  static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)  { +	struct rtl8366rb *rb; +	bool pvid_enabled; +	int ret; + +	rb = smi->chip_data; +	pvid_enabled = !!index; +  	if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)  		return -EINVAL; -	return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), +	ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),  				RTL8366RB_PORT_VLAN_CTRL_MASK <<  					RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),  				(index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<  					RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)); +	if (ret) +		return ret; + +	rb->pvid_enabled[port] = pvid_enabled; + +	/* If VLAN filtering is enabled and PVID is also enabled, we must +	 * not drop any untagged or C-tagged frames. Make sure to update the +	 * filtering setting. +	 */ +	if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port))) +		ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled); + +	return ret;  }  static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)  { -	unsigned int max = RTL8366RB_NUM_VLANS; +	unsigned int max = RTL8366RB_NUM_VLANS - 1;  	if (smi->vlan4k_enabled)  		max = RTL8366RB_NUM_VIDS - 1; -	if (vlan == 0 || vlan > max) +	if (vlan > max)  		return false;  	return true; @@ -1510,11 +1771,17 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {  	.get_strings = rtl8366_get_strings,  	.get_ethtool_stats = rtl8366_get_ethtool_stats,  	.get_sset_count = rtl8366_get_sset_count, -	.port_vlan_filtering = rtl8366_vlan_filtering, +	.port_bridge_join = rtl8366rb_port_bridge_join, +	.port_bridge_leave = rtl8366rb_port_bridge_leave, +	.port_vlan_filtering = rtl8366rb_vlan_filtering,  	.port_vlan_add = rtl8366_vlan_add,  	.port_vlan_del = rtl8366_vlan_del,  	.port_enable = rtl8366rb_port_enable,  	.port_disable = rtl8366rb_port_disable, +	.port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags, +	.port_bridge_flags = rtl8366rb_port_bridge_flags, +	.port_stp_state_set = rtl8366rb_port_stp_state_set, +	.port_fast_age = rtl8366rb_port_fast_age,  	.port_change_mtu = rtl8366rb_change_mtu,  	.port_max_mtu = rtl8366rb_max_mtu,  }; diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 5e5d24e7c02b..21dba16af097 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -20,6 +20,27 @@  #define SJA1105_AGEING_TIME_MS(ms)	((ms) / 10)  #define SJA1105_NUM_L2_POLICERS		SJA1110_MAX_L2_POLICING_COUNT +/* Calculated assuming 1Gbps, where the clock has 125 MHz (8 ns period) + * To avoid floating point operations, we'll multiply the degrees by 10 + * to get a "phase" and get 1 decimal point precision. + */ +#define SJA1105_RGMII_DELAY_PS_TO_PHASE(ps) \ +	(((ps) * 360) / 800) +#define SJA1105_RGMII_DELAY_PHASE_TO_PS(phase) \ +	((800 * (phase)) / 360) +#define SJA1105_RGMII_DELAY_PHASE_TO_HW(phase) \ +	(((phase) - 738) / 9) +#define SJA1105_RGMII_DELAY_PS_TO_HW(ps) \ +	SJA1105_RGMII_DELAY_PHASE_TO_HW(SJA1105_RGMII_DELAY_PS_TO_PHASE(ps)) + +/* Valid range in degrees is a value between 73.8 and 101.7 + * in 0.9 degree increments + */ +#define SJA1105_RGMII_DELAY_MIN_PS \ +	SJA1105_RGMII_DELAY_PHASE_TO_PS(738) +#define SJA1105_RGMII_DELAY_MAX_PS \ +	SJA1105_RGMII_DELAY_PHASE_TO_PS(1017) +  typedef enum {  	SPI_READ = 0,  	SPI_WRITE = 1, @@ -222,16 +243,14 @@ struct sja1105_flow_block {  struct sja1105_private {  	struct sja1105_static_config static_config; -	bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS]; -	bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS]; +	int rgmii_rx_delay_ps[SJA1105_MAX_NUM_PORTS]; +	int rgmii_tx_delay_ps[SJA1105_MAX_NUM_PORTS];  	phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];  	bool fixed_link[SJA1105_MAX_NUM_PORTS]; -	bool vlan_aware;  	unsigned long ucast_egress_floods;  	unsigned long bcast_egress_floods;  	const struct sja1105_info *info;  	size_t max_xfer_len; -	struct gpio_desc *reset_gpio;  	struct spi_device *spidev;  	struct dsa_switch *ds;  	u16 bridge_pvid[SJA1105_MAX_NUM_PORTS]; @@ -242,6 +261,8 @@ struct sja1105_private {  	 * the switch doesn't confuse them with one another.  	 */  	struct mutex mgmt_lock; +	/* Serializes access to the dynamic config interface */ +	struct mutex dynamic_config_lock;  	struct devlink_region **regions;  	struct sja1105_cbs_entry *cbs;  	struct mii_bus *mdio_base_t1; diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c index 5bbf1707f2af..e3699f76f6d7 100644 --- a/drivers/net/dsa/sja1105/sja1105_clocking.c +++ b/drivers/net/dsa/sja1105/sja1105_clocking.c @@ -498,17 +498,6 @@ sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,  	sja1105_packing(buf, &cmd->txc_pd,          0,  0, size, op);  } -/* Valid range in degrees is an integer between 73.8 and 101.7 */ -static u64 sja1105_rgmii_delay(u64 phase) -{ -	/* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9. -	 * To avoid floating point operations we'll multiply by 10 -	 * and get 1 decimal point precision. -	 */ -	phase *= 10; -	return (phase - 738) / 9; -} -  /* The RGMII delay setup procedure is 2-step and gets called upon each   * .phylink_mac_config. Both are strategic.   * The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues @@ -521,13 +510,15 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)  	const struct sja1105_private *priv = ctx;  	const struct sja1105_regs *regs = priv->info->regs;  	struct sja1105_cfg_pad_mii_id pad_mii_id = {0}; +	int rx_delay = priv->rgmii_rx_delay_ps[port]; +	int tx_delay = priv->rgmii_tx_delay_ps[port];  	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};  	int rc; -	if (priv->rgmii_rx_delay[port]) -		pad_mii_id.rxc_delay = sja1105_rgmii_delay(90); -	if (priv->rgmii_tx_delay[port]) -		pad_mii_id.txc_delay = sja1105_rgmii_delay(90); +	if (rx_delay) +		pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay); +	if (tx_delay) +		pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);  	/* Stage 1: Turn the RGMII delay lines off. */  	pad_mii_id.rxc_bypass = 1; @@ -542,11 +533,11 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)  		return rc;  	/* Stage 2: Turn the RGMII delay lines on. */ -	if (priv->rgmii_rx_delay[port]) { +	if (rx_delay) {  		pad_mii_id.rxc_bypass = 0;  		pad_mii_id.rxc_pd = 0;  	} -	if (priv->rgmii_tx_delay[port]) { +	if (tx_delay) {  		pad_mii_id.txc_bypass = 0;  		pad_mii_id.txc_pd = 0;  	} @@ -561,20 +552,22 @@ int sja1110_setup_rgmii_delay(const void *ctx, int port)  	const struct sja1105_private *priv = ctx;  	const struct sja1105_regs *regs = priv->info->regs;  	struct sja1105_cfg_pad_mii_id pad_mii_id = {0}; +	int rx_delay = priv->rgmii_rx_delay_ps[port]; +	int tx_delay = priv->rgmii_tx_delay_ps[port];  	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};  	pad_mii_id.rxc_pd = 1;  	pad_mii_id.txc_pd = 1; -	if (priv->rgmii_rx_delay[port]) { -		pad_mii_id.rxc_delay = sja1105_rgmii_delay(90); +	if (rx_delay) { +		pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);  		/* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */  		pad_mii_id.rxc_bypass = 1;  		pad_mii_id.rxc_pd = 0;  	} -	if (priv->rgmii_tx_delay[port]) { -		pad_mii_id.txc_delay = sja1105_rgmii_delay(90); +	if (tx_delay) { +		pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);  		pad_mii_id.txc_bypass = 1;  		pad_mii_id.txc_pd = 0;  	} diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index f2049f52833c..7729d3f8b7f5 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -1170,6 +1170,56 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {  	},  }; +#define SJA1105_DYNAMIC_CONFIG_SLEEP_US		10 +#define SJA1105_DYNAMIC_CONFIG_TIMEOUT_US	100000 + +static int +sja1105_dynamic_config_poll_valid(struct sja1105_private *priv, +				  struct sja1105_dyn_cmd *cmd, +				  const struct sja1105_dynamic_table_ops *ops) +{ +	u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {}; +	int rc; + +	/* We don't _need_ to read the full entry, just the command area which +	 * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a +	 * buffer that contains the full entry too. Additionally, our API +	 * doesn't really know how many bytes into the buffer does the command +	 * area really begin. So just read back the whole entry. +	 */ +	rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf, +			      ops->packed_size); +	if (rc) +		return rc; + +	/* Unpack the command structure, and return it to the caller in case it +	 * needs to perform further checks on it (VALIDENT). +	 */ +	memset(cmd, 0, sizeof(*cmd)); +	ops->cmd_packing(packed_buf, cmd, UNPACK); + +	/* Hardware hasn't cleared VALID => still working on it */ +	return cmd->valid ? -EAGAIN : 0; +} + +/* Poll the dynamic config entry's control area until the hardware has + * cleared the VALID bit, which means we have confirmation that it has + * finished processing the command. + */ +static int +sja1105_dynamic_config_wait_complete(struct sja1105_private *priv, +				     struct sja1105_dyn_cmd *cmd, +				     const struct sja1105_dynamic_table_ops *ops) +{ +	int rc; + +	return read_poll_timeout(sja1105_dynamic_config_poll_valid, +				 rc, rc != -EAGAIN, +				 SJA1105_DYNAMIC_CONFIG_SLEEP_US, +				 SJA1105_DYNAMIC_CONFIG_TIMEOUT_US, +				 false, priv, cmd, ops); +} +  /* Provides read access to the settings through the dynamic interface   * of the switch.   * @blk_idx	is used as key to select from the sja1105_dynamic_table_ops. @@ -1196,7 +1246,6 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,  	struct sja1105_dyn_cmd cmd = {0};  	/* SPI payload buffer */  	u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0}; -	int retries = 3;  	int rc;  	if (blk_idx >= BLK_IDX_MAX_DYN) @@ -1234,33 +1283,21 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,  		ops->entry_packing(packed_buf, entry, PACK);  	/* Send SPI write operation: read config table entry */ +	mutex_lock(&priv->dynamic_config_lock);  	rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,  			      ops->packed_size); -	if (rc < 0) +	if (rc < 0) { +		mutex_unlock(&priv->dynamic_config_lock);  		return rc; +	} -	/* Loop until we have confirmation that hardware has finished -	 * processing the command and has cleared the VALID field -	 */ -	do { -		memset(packed_buf, 0, ops->packed_size); - -		/* Retrieve the read operation's result */ -		rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf, -				      ops->packed_size); -		if (rc < 0) -			return rc; - -		cmd = (struct sja1105_dyn_cmd) {0}; -		ops->cmd_packing(packed_buf, &cmd, UNPACK); - -		if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY)) -			return -ENOENT; -		cpu_relax(); -	} while (cmd.valid && --retries); +	rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops); +	mutex_unlock(&priv->dynamic_config_lock); +	if (rc < 0) +		return rc; -	if (cmd.valid) -		return -ETIMEDOUT; +	if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY)) +		return -ENOENT;  	/* Don't dereference possibly NULL pointer - maybe caller  	 * only wanted to see whether the entry existed or not. @@ -1316,8 +1353,16 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,  		ops->entry_packing(packed_buf, entry, PACK);  	/* Send SPI write operation: read config table entry */ +	mutex_lock(&priv->dynamic_config_lock);  	rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,  			      ops->packed_size); +	if (rc < 0) { +		mutex_unlock(&priv->dynamic_config_lock); +		return rc; +	} + +	rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops); +	mutex_unlock(&priv->dynamic_config_lock);  	if (rc < 0)  		return rc; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 924c3f129992..c343effe2e96 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -27,15 +27,29 @@  #define SJA1105_UNKNOWN_MULTICAST	0x010000000000ull -static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, -			     unsigned int startup_delay) +/* Configure the optional reset pin and bring up switch */ +static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len, +			    unsigned int startup_delay)  { +	struct gpio_desc *gpio; + +	gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); +	if (IS_ERR(gpio)) +		return PTR_ERR(gpio); + +	if (!gpio) +		return 0; +  	gpiod_set_value_cansleep(gpio, 1);  	/* Wait for minimum reset pulse length */  	msleep(pulse_len);  	gpiod_set_value_cansleep(gpio, 0);  	/* Wait until chip is ready after reset */  	msleep(startup_delay); + +	gpiod_put(gpio); + +	return 0;  }  static void @@ -1095,27 +1109,78 @@ static int sja1105_static_config_load(struct sja1105_private *priv)  	return sja1105_static_config_upload(priv);  } -static int sja1105_parse_rgmii_delays(struct sja1105_private *priv) +/* This is the "new way" for a MAC driver to configure its RGMII delay lines, + * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps" + * properties. It has the advantage of working with fixed links and with PHYs + * that apply RGMII delays too, and the MAC driver needs not perform any + * special checks. + * + * Previously we were acting upon the "phy-mode" property when we were + * operating in fixed-link, basically acting as a PHY, but with a reversed + * interpretation: PHY_INTERFACE_MODE_RGMII_TXID means that the MAC should + * behave as if it is connected to a PHY which has applied RGMII delays in the + * TX direction. So if anything, RX delays should have been added by the MAC, + * but we were adding TX delays. + * + * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall + * back to the legacy behavior and apply delays on fixed-link ports based on + * the reverse interpretation of the phy-mode. This is a deviation from the + * expected default behavior which is to simply apply no delays. To achieve + * that behavior with the new bindings, it is mandatory to specify + * "{rx,tx}-internal-delay-ps" with a value of 0. + */ +static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int port, +				      struct device_node *port_dn)  { -	struct dsa_switch *ds = priv->ds; -	int port; +	phy_interface_t phy_mode = priv->phy_mode[port]; +	struct device *dev = &priv->spidev->dev; +	int rx_delay = -1, tx_delay = -1; -	for (port = 0; port < ds->num_ports; port++) { -		if (!priv->fixed_link[port]) -			continue; +	if (!phy_interface_mode_is_rgmii(phy_mode)) +		return 0; -		if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID || -		    priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID) -			priv->rgmii_rx_delay[port] = true; +	of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); +	of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); -		if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID || -		    priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID) -			priv->rgmii_tx_delay[port] = true; +	if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) { +		dev_warn(dev, +			 "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, " +			 "please update device tree to specify \"rx-internal-delay-ps\" and " +			 "\"tx-internal-delay-ps\"", +			 port); -		if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) && -		    !priv->info->setup_rgmii_delay) -			return -EINVAL; +		if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || +		    phy_mode == PHY_INTERFACE_MODE_RGMII_ID) +			rx_delay = 2000; + +		if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || +		    phy_mode == PHY_INTERFACE_MODE_RGMII_ID) +			tx_delay = 2000; +	} + +	if (rx_delay < 0) +		rx_delay = 0; +	if (tx_delay < 0) +		tx_delay = 0; + +	if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) { +		dev_err(dev, "Chip cannot apply RGMII delays\n"); +		return -EINVAL;  	} + +	if ((rx_delay && rx_delay < SJA1105_RGMII_DELAY_MIN_PS) || +	    (tx_delay && tx_delay < SJA1105_RGMII_DELAY_MIN_PS) || +	    (rx_delay > SJA1105_RGMII_DELAY_MAX_PS) || +	    (tx_delay > SJA1105_RGMII_DELAY_MAX_PS)) { +		dev_err(dev, +			"port %d RGMII delay values out of range, must be between %d and %d ps\n", +			port, SJA1105_RGMII_DELAY_MIN_PS, SJA1105_RGMII_DELAY_MAX_PS); +		return -ERANGE; +	} + +	priv->rgmii_rx_delay_ps[port] = rx_delay; +	priv->rgmii_tx_delay_ps[port] = tx_delay; +  	return 0;  } @@ -1166,6 +1231,12 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,  		}  		priv->phy_mode[index] = phy_mode; + +		err = sja1105_parse_rgmii_delays(priv, index, child); +		if (err) { +			of_node_put(child); +			return err; +		}  	}  	return 0; @@ -1360,7 +1431,7 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,  	 */  	if (state->interface != PHY_INTERFACE_MODE_NA &&  	    sja1105_phy_mode_mismatch(priv, port, state->interface)) { -		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +		linkmode_zero(supported);  		return;  	} @@ -1380,9 +1451,8 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,  		phylink_set(mask, 2500baseX_Full);  	} -	bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); -	bitmap_and(state->advertising, state->advertising, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask);  }  static int @@ -1766,6 +1836,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,  static int sja1105_fdb_dump(struct dsa_switch *ds, int port,  			    dsa_fdb_dump_cb_t *cb, void *data)  { +	struct dsa_port *dp = dsa_to_port(ds, port);  	struct sja1105_private *priv = ds->priv;  	struct device *dev = ds->dev;  	int i; @@ -1802,7 +1873,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,  		u64_to_ether_addr(l2_lookup.macaddr, macaddr);  		/* We need to hide the dsa_8021q VLANs from the user. */ -		if (!priv->vlan_aware) +		if (!dsa_port_is_vlan_filtering(dp))  			l2_lookup.vlanid = 0;  		rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);  		if (rc) @@ -2295,11 +2366,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,  		tpid2 = ETH_P_SJA1105;  	} -	if (priv->vlan_aware == enabled) -		return 0; - -	priv->vlan_aware = enabled; -  	table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];  	general_params = table->entries;  	/* EtherType used to identify inner tagged (C-tag) VLAN traffic */ @@ -2332,7 +2398,7 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,  	 */  	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];  	l2_lookup_params = table->entries; -	l2_lookup_params->shared_learn = !priv->vlan_aware; +	l2_lookup_params->shared_learn = !enabled;  	for (port = 0; port < ds->num_ports; port++) {  		if (dsa_is_unused_port(ds, port)) @@ -2965,7 +3031,6 @@ static int sja1105_setup_ports(struct sja1105_private *priv)  			continue;  		dp->priv = sp; -		sp->dp = dp;  		sp->data = tagger_data;  		slave = dp->slave;  		kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); @@ -3229,17 +3294,14 @@ static int sja1105_probe(struct spi_device *spi)  		return -EINVAL;  	} +	rc = sja1105_hw_reset(dev, 1, 1); +	if (rc) +		return rc; +  	priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);  	if (!priv)  		return -ENOMEM; -	/* Configure the optional reset pin and bring up switch */ -	priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); -	if (IS_ERR(priv->reset_gpio)) -		dev_dbg(dev, "reset-gpios not defined, ignoring\n"); -	else -		sja1105_hw_reset(priv->reset_gpio, 1, 1); -  	/* Populate our driver private structure (priv) based on  	 * the device tree node that was probed (spi)  	 */ @@ -3303,6 +3365,7 @@ static int sja1105_probe(struct spi_device *spi)  	priv->ds = ds;  	mutex_init(&priv->ptp_data.lock); +	mutex_init(&priv->dynamic_config_lock);  	mutex_init(&priv->mgmt_lock);  	rc = sja1105_parse_dt(priv); @@ -3311,15 +3374,6 @@ static int sja1105_probe(struct spi_device *spi)  		return rc;  	} -	/* Error out early if internal delays are required through DT -	 * and we can't apply them. -	 */ -	rc = sja1105_parse_rgmii_delays(priv); -	if (rc < 0) { -		dev_err(ds->dev, "RGMII delay not supported\n"); -		return rc; -	} -  	if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {  		priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,  					 sizeof(struct sja1105_cbs_entry), diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index 6802f4057cc0..f5dca6a9b0f9 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -394,7 +394,8 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,  				vl_lookup[k].vlanid = rule->key.vl.vid;  				vl_lookup[k].vlanprior = rule->key.vl.pcp;  			} else { -				u16 vid = dsa_8021q_rx_vid(priv->ds, port); +				struct dsa_port *dp = dsa_to_port(priv->ds, port); +				u16 vid = dsa_tag_8021q_rx_vid(dp);  				vl_lookup[k].vlanid = vid;  				vl_lookup[k].vlanprior = 0; @@ -494,13 +495,15 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port,  			bool append)  {  	struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); +	struct dsa_port *dp = dsa_to_port(priv->ds, port); +	bool vlan_aware = dsa_port_is_vlan_filtering(dp);  	int rc; -	if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { +	if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {  		NL_SET_ERR_MSG_MOD(extack,  				   "Can only redirect based on DMAC");  		return -EOPNOTSUPP; -	} else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { +	} else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {  		NL_SET_ERR_MSG_MOD(extack,  				   "Can only redirect based on {DMAC, VID, PCP}");  		return -EOPNOTSUPP; @@ -568,6 +571,8 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,  		    u32 num_entries, struct action_gate_entry *entries)  {  	struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); +	struct dsa_port *dp = dsa_to_port(priv->ds, port); +	bool vlan_aware = dsa_port_is_vlan_filtering(dp);  	int ipv = -1;  	int i, rc;  	s32 rem; @@ -592,11 +597,11 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,  		return -ERANGE;  	} -	if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { +	if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {  		NL_SET_ERR_MSG_MOD(extack,  				   "Can only gate based on DMAC");  		return -EOPNOTSUPP; -	} else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { +	} else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {  		NL_SET_ERR_MSG_MOD(extack,  				   "Can only gate based on {DMAC, VID, PCP}");  		return -EOPNOTSUPP; diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c index 469420941054..910fcb3b252b 100644 --- a/drivers/net/dsa/xrs700x/xrs700x.c +++ b/drivers/net/dsa/xrs700x/xrs700x.c @@ -456,7 +456,7 @@ static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,  		phylink_set(mask, 1000baseT_Full);  		break;  	default: -		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); +		linkmode_zero(supported);  		dev_err(ds->dev, "Unsupported port: %i\n", port);  		return;  	} @@ -467,10 +467,8 @@ static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,  	phylink_set(mask, 10baseT_Full);  	phylink_set(mask, 100baseT_Full); -	bitmap_and(supported, supported, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); -	bitmap_and(state->advertising, state->advertising, mask, -		   __ETHTOOL_LINK_MODE_MASK_NBITS); +	linkmode_and(supported, supported, mask); +	linkmode_and(state->advertising, state->advertising, mask);  }  static void xrs700x_mac_link_up(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c index d01cf1073d49..127a677d1f39 100644 --- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c +++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c @@ -31,7 +31,7 @@ static int xrs700x_mdio_reg_read(void *context, unsigned int reg,  	uval = (u16)FIELD_GET(GENMASK(31, 16), reg); -	ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval); +	ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);  	if (ret < 0) {  		dev_err(dev, "xrs mdiobus_write returned %d\n", ret);  		return ret; @@ -39,13 +39,13 @@ static int xrs700x_mdio_reg_read(void *context, unsigned int reg,  	uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_READ); -	ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval); +	ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);  	if (ret < 0) {  		dev_err(dev, "xrs mdiobus_write returned %d\n", ret);  		return ret;  	} -	ret = mdiobus_read(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD); +	ret = mdiodev_read(mdiodev, XRS_MDIO_IBD);  	if (ret < 0) {  		dev_err(dev, "xrs mdiobus_read returned %d\n", ret);  		return ret; @@ -64,7 +64,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,  	u16 uval;  	int ret; -	ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD, (u16)val); +	ret = mdiodev_write(mdiodev, XRS_MDIO_IBD, (u16)val);  	if (ret < 0) {  		dev_err(dev, "xrs mdiobus_write returned %d\n", ret);  		return ret; @@ -72,7 +72,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,  	uval = (u16)FIELD_GET(GENMASK(31, 16), reg); -	ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval); +	ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);  	if (ret < 0) {  		dev_err(dev, "xrs mdiobus_write returned %d\n", ret);  		return ret; @@ -80,7 +80,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,  	uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_WRITE); -	ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval); +	ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);  	if (ret < 0) {  		dev_err(dev, "xrs mdiobus_write returned %d\n", ret);  		return ret; | 
