summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40e/i40e_main.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-04-20 11:32:26 +1000
committerDave Airlie <airlied@redhat.com>2015-04-20 13:05:20 +1000
commit2c33ce009ca2389dbf0535d0672214d09738e35e (patch)
tree6186a6458c3c160385d794a23eaf07c786a9e61b /drivers/net/ethernet/intel/i40e/i40e_main.c
parentcec32a47010647e8b0603726ebb75b990a4057a4 (diff)
parent09d51602cf84a1264946711dd4ea0dddbac599a1 (diff)
Merge Linus master into drm-next
The merge is clean, but the arm build fails afterwards, due to API changes in the regulator tree. I've included the patch into the merge to fix the build. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_main.c')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c935
1 files changed, 706 insertions, 229 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index dadda3c5d658..24481cd7e59a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 6
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -75,6 +75,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
/* required last entry */
{0, }
};
@@ -249,6 +250,22 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
}
/**
+ * i40e_find_vsi_from_id - searches for the vsi with the given id
+ * @pf - the pf structure to search for the vsi
+ * @id - id of the vsi it is searching for
+ **/
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->id == id))
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
* i40e_service_event_schedule - Schedule the service task to wake up
* @pf: board private structure
*
@@ -450,7 +467,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
}
/**
- * i40e_pf_reset_stats - Reset all of the stats for the given pf
+ * i40e_pf_reset_stats - Reset all of the stats for the given PF
* @pf: the PF to be reset
**/
void i40e_pf_reset_stats(struct i40e_pf *pf)
@@ -896,7 +913,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
}
/**
- * i40e_update_pf_stats - Update the pf statistics counters.
+ * i40e_update_pf_stats - Update the PF statistics counters.
* @pf: the PF to be updated
**/
static void i40e_update_pf_stats(struct i40e_pf *pf)
@@ -919,11 +936,6 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
pf->stat_offsets_loaded,
&osd->eth.rx_discards,
&nsd->eth.rx_discards);
- i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_discards,
- &nsd->eth.tx_discards);
-
i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
I40E_GLPRT_UPRCL(hw->port),
pf->stat_offsets_loaded,
@@ -1133,7 +1145,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL
@@ -1161,7 +1173,7 @@ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
* i40e_find_mac - Find a mac addr in the macvlan filters list
* @vsi: the VSI to be searched
* @macaddr: the MAC address we are searching for
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns the first filter with the provided MAC address or NULL if
@@ -1209,7 +1221,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
* i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
- * @is_vf: true if it is a vf
+ * @is_vf: true if it is a VF
* @is_netdev: true if it is a netdev
*
* Goes through all the macvlan filters and adds a
@@ -1270,7 +1282,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL when no memory available.
@@ -1330,7 +1342,7 @@ add_filter_out:
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure it's a vf filter, else doesn't matter
+ * @is_vf: make sure it's a VF filter, else doesn't matter
* @is_netdev: make sure it's a netdev filter, else doesn't matter
**/
void i40e_del_filter(struct i40e_vsi *vsi,
@@ -1357,7 +1369,7 @@ void i40e_del_filter(struct i40e_vsi *vsi,
f->counter--;
}
} else {
- /* make sure we don't remove a filter in use by vf or netdev */
+ /* make sure we don't remove a filter in use by VF or netdev */
int min_f = 0;
min_f += (f->is_vf ? 1 : 0);
min_f += (f->is_netdev ? 1 : 0);
@@ -1546,7 +1558,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.tc_info[i].qoffset = offset;
vsi->tc_config.tc_info[i].qcount = qcount;
- /* find the power-of-2 of the number of queue pairs */
+ /* find the next higher power-of-2 of num queue pairs */
num_qps = qcount;
pow = 0;
while (num_qps && ((1 << pow) < qcount)) {
@@ -1576,6 +1588,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
/* Set actual Tx/Rx queue pairs */
vsi->num_queue_pairs = offset;
+ if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
+ if (vsi->req_queue_pairs > 0)
+ vsi->num_queue_pairs = vsi->req_queue_pairs;
+ else
+ vsi->num_queue_pairs = pf->num_lan_msix;
+ }
/* Scheduler section valid can only be set for ADD VSI */
if (is_add) {
@@ -1967,7 +1985,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
ctxt.seid = vsi->seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
@@ -1996,7 +2014,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
ctxt.seid = vsi->seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
@@ -2280,7 +2298,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
I40E_AQ_VSI_PVLAN_EMOD_STR;
ctxt.seid = vsi->seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (aq_ret) {
dev_info(&vsi->back->pdev->dev,
@@ -2398,20 +2416,20 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
struct i40e_vsi *vsi = ring->vsi;
cpumask_var_t mask;
- if (ring->q_vector && ring->netdev) {
- /* Single TC mode enable XPS */
- if (vsi->tc_config.numtc <= 1 &&
- !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
+ if (!ring->q_vector || !ring->netdev)
+ return;
+
+ /* Single TC mode enable XPS */
+ if (vsi->tc_config.numtc <= 1) {
+ if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask,
ring->queue_index);
- } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
- /* Disable XPS to allow selection based on TC */
- bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
- netif_set_xps_queue(ring->netdev, mask,
- ring->queue_index);
- free_cpumask_var(mask);
- }
+ } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ /* Disable XPS to allow selection based on TC */
+ bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
+ netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
+ free_cpumask_var(mask);
}
}
@@ -2596,7 +2614,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+ if (ring_is_ps_enabled(ring)) {
+ i40e_alloc_rx_headers(ring);
+ i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
+ } else {
+ i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
+ }
return 0;
}
@@ -3183,13 +3206,16 @@ static irqreturn_t i40e_intr(int irq, void *data)
pf->globr_count++;
} else if (val == I40E_RESET_EMPR) {
pf->empr_count++;
- set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
}
}
if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
+ rd32(hw, I40E_PFHMC_ERRORINFO),
+ rd32(hw, I40E_PFHMC_ERRORDATA));
}
if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
@@ -3825,6 +3851,8 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
pci_disable_msix(pf->pdev);
kfree(pf->msix_entries);
pf->msix_entries = NULL;
+ kfree(pf->irq_pile);
+ pf->irq_pile = NULL;
} else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
pci_disable_msi(pf->pdev);
}
@@ -4021,7 +4049,7 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
#endif
/**
* i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
- * @pf: pointer to pf
+ * @pf: pointer to PF
*
* Get TC map for ISCSI PF type that will include iSCSI TC
* and LAN TC.
@@ -4119,7 +4147,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
if (pf->hw.func_caps.iscsi)
enabled_tc = i40e_get_iscsi_tc_map(pf);
else
- enabled_tc = pf->hw.func_caps.enabled_tcmap;
+ return 1; /* Only TC0 */
/* At least have TC0 */
enabled_tc = (enabled_tc ? enabled_tc : 0x1);
@@ -4169,11 +4197,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
- /* MPF enabled and iSCSI PF type */
+ /* MFP enabled and iSCSI PF type */
if (pf->hw.func_caps.iscsi)
return i40e_get_iscsi_tc_map(pf);
else
- return pf->hw.func_caps.enabled_tcmap;
+ return i40e_pf_get_default_tc(pf);
}
/**
@@ -4196,7 +4224,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "couldn't get pf vsi bw config, err %d, aq_err %d\n",
+ "couldn't get PF vsi bw config, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
return -EINVAL;
}
@@ -4206,7 +4234,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
+ "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
return -EINVAL;
}
@@ -4383,7 +4411,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.pf_num = vsi->back->hw.pf_id;
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
/* Update the VSI after updating the VSI queue-mapping information */
@@ -4563,6 +4591,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err = 0;
+ /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4))
+ goto out;
+
/* Get the initial DCB configuration */
err = i40e_init_dcb(hw);
if (!err) {
@@ -4626,6 +4659,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
case I40E_LINK_SPEED_40GB:
strlcpy(speed, "40 Gbps", SPEED_SIZE);
break;
+ case I40E_LINK_SPEED_20GB:
+ strncpy(speed, "20 Gbps", SPEED_SIZE);
+ break;
case I40E_LINK_SPEED_10GB:
strlcpy(speed, "10 Gbps", SPEED_SIZE);
break;
@@ -4853,11 +4889,7 @@ exit:
*
* Returns 0 on success, negative value on failure
**/
-#ifdef I40E_FCOE
int i40e_open(struct net_device *netdev)
-#else
-static int i40e_open(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -4967,7 +4999,7 @@ err_setup_tx:
/**
* i40e_fdir_filter_exit - Cleans up the Flow Director accounting
- * @pf: Pointer to pf
+ * @pf: Pointer to PF
*
* This function destroys the hlist where all the Flow Director
* filters were saved.
@@ -5055,24 +5087,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
- } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
-
- /* Request a Firmware Reset
- *
- * Same as Global reset, plus restarting the
- * embedded firmware engine.
- */
- /* enable EMP Reset */
- val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
- val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
- wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
-
- /* force the reset */
- val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
- val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
- wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
- i40e_flush(&pf->hw);
-
} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
@@ -5195,7 +5209,6 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
struct i40e_aqc_lldp_get_mib *mib =
(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
struct i40e_hw *hw = &pf->hw;
- struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
struct i40e_dcbx_config tmp_dcbx_cfg;
bool need_reconfig = false;
int ret = 0;
@@ -5226,10 +5239,11 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
goto exit;
}
- memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
/* Store the old configuration */
- tmp_dcbx_cfg = *dcbx_cfg;
+ tmp_dcbx_cfg = hw->local_dcbx_config;
+ /* Reset the old DCBx configuration data */
+ memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
/* Get updated DCBX data from firmware */
ret = i40e_get_dcb_config(&pf->hw);
if (ret) {
@@ -5238,20 +5252,22 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
}
/* No change detected in DCBX configs */
- if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+ if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
+ sizeof(tmp_dcbx_cfg))) {
dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
goto exit;
}
- need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg);
+ need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
+ &hw->local_dcbx_config);
- i40e_dcbnl_flush_apps(pf, dcbx_cfg);
+ i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
if (!need_reconfig)
goto exit;
/* Enable DCB tagging only when more than one TC */
- if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
+ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
else
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
@@ -5351,9 +5367,9 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
* i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
* @pf: board private structure
**/
-int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
{
- int val, fcnt_prog;
+ u32 val, fcnt_prog;
val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
@@ -5361,12 +5377,13 @@ int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
}
/**
- * i40e_get_current_fd_count - Get the count of total FD filters programmed
+ * i40e_get_current_fd_count - Get total FD filters programmed for this PF
* @pf: board private structure
**/
-int i40e_get_current_fd_count(struct i40e_pf *pf)
+u32 i40e_get_current_fd_count(struct i40e_pf *pf)
{
- int val, fcnt_prog;
+ u32 val, fcnt_prog;
+
val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
@@ -5375,6 +5392,21 @@ int i40e_get_current_fd_count(struct i40e_pf *pf)
}
/**
+ * i40e_get_global_fd_count - Get total FD filters programmed on device
+ * @pf: board private structure
+ **/
+u32 i40e_get_global_fd_count(struct i40e_pf *pf)
+{
+ u32 val, fcnt_prog;
+
+ val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
+ fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
+ ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
+ I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+ return fcnt_prog;
+}
+
+/**
* i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
* @pf: board private structure
**/
@@ -5388,7 +5420,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
/* Check if, FD SB or ATR was auto disabled and if there is enough room
* to re-enable
*/
- fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+ fcnt_prog = i40e_get_global_fd_count(pf);
fcnt_avail = pf->fdir_pf_filter_count;
if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
(pf->fd_add_err == 0) ||
@@ -5410,13 +5442,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
}
#define I40E_MIN_FD_FLUSH_INTERVAL 10
+#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
/**
* i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
* @pf: board private structure
**/
static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
{
+ unsigned long min_flush_time;
int flush_wait_retry = 50;
+ bool disable_atr = false;
+ int fd_room;
int reg;
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
@@ -5424,9 +5460,20 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
- set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ /* If the flush is happening too quick and we have mostly
+ * SB rules we should not re-enable ATR for some time.
+ */
+ min_flush_time = pf->fd_flush_timestamp
+ + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+ fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+
+ if (!(time_after(jiffies, min_flush_time)) &&
+ (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+ dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+ disable_atr = true;
+ }
+
pf->fd_flush_timestamp = jiffies;
- pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
@@ -5446,10 +5493,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
} else {
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
-
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ if (!disable_atr)
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
@@ -5460,7 +5505,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
* i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
* @pf: board private structure
**/
-int i40e_get_current_atr_cnt(struct i40e_pf *pf)
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
{
return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
}
@@ -5486,9 +5531,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
return;
- if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
- (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
- (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
i40e_fdir_flush_and_replay(pf);
i40e_fdir_check_and_reenable(pf);
@@ -5757,11 +5800,9 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
struct i40e_hw *hw = &pf->hw;
struct i40e_aqc_get_link_status *status =
(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
- struct i40e_link_status *hw_link_info = &hw->phy.link_info;
/* save off old link status information */
- memcpy(&pf->hw.phy.link_info_old, hw_link_info,
- sizeof(pf->hw.phy.link_info_old));
+ hw->phy.link_info_old = hw->phy.link_info;
/* Do a new status request to re-enable LSE reporting
* and load new status information into the hw struct
@@ -5875,6 +5916,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
case i40e_aqc_opc_send_msg_to_peer:
dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
break;
+ case i40e_aqc_opc_nvm_erase:
+ case i40e_aqc_opc_nvm_update:
+ i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
+ break;
default:
dev_info(&pf->pdev->dev,
"ARQ Error: Unknown event 0x%04x received\n",
@@ -5919,6 +5964,94 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
}
/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get PF vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get PF vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_config_bridge_mode - Configure the HW bridge mode
+ * @veb: pointer to the bridge instance
+ *
+ * Configure the loop back mode for the LAN VSI that is downlink to the
+ * specified HW bridge instance. It is expected this function is called
+ * when a new HW bridge is instantiated.
+ **/
+static void i40e_config_bridge_mode(struct i40e_veb *veb)
+{
+ struct i40e_pf *pf = veb->pf;
+
+ dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+ veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ if (veb->bridge_mode & BRIDGE_MODE_VEPA)
+ i40e_disable_pf_switch_lb(pf);
+ else
+ i40e_enable_pf_switch_lb(pf);
+}
+
+/**
* i40e_reconstitute_veb - rebuild the VEB and anything connected to it
* @veb: pointer to the VEB instance
*
@@ -5964,8 +6097,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (ret)
goto end_reconstitute;
- /* Enable LB mode for the main VSI now that it is on a VEB */
- i40e_enable_pf_switch_lb(pf);
+ i40e_config_bridge_mode(veb);
/* create the remaining VSIs attached to this VEB */
for (v = 0; v < pf->num_alloc_vsi; v++) {
@@ -6137,7 +6269,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
*
- * Close up the VFs and other things in prep for pf Reset.
+ * Close up the VFs and other things in prep for PF Reset.
**/
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
@@ -6222,10 +6354,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
/* re-verify the eeprom if we just had an EMP reset */
- if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
- clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
i40e_verify_eeprom(pf);
- }
i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf);
@@ -6335,13 +6465,14 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
}
- msleep(75);
- ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
- if (ret) {
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4)) {
+ msleep(75);
+ ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
}
-
/* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
ret = i40e_setup_misc_vector(pf);
@@ -6364,7 +6495,7 @@ clear_recovery:
}
/**
- * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
+ * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
* @pf: board private structure
*
* Close up the VFs and other things in prep for a Core Reset,
@@ -6378,7 +6509,7 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
/**
* i40e_handle_mdd_event
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
*
* Called from the MDD irq handler to identify possibly malicious vfs
**/
@@ -6407,7 +6538,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
I40E_GL_MDET_TX_QUEUE_SHIFT) -
pf->hw.func_caps.base_queue;
if (netif_msg_tx_err(pf))
- dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
event, queue, pf_num, vf_num);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
@@ -6493,7 +6624,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
- u8 filter_index;
__be16 port;
int i;
@@ -6506,22 +6636,20 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
if (pf->pending_vxlan_bitmap & (1 << i)) {
pf->pending_vxlan_bitmap &= ~(1 << i);
port = pf->vxlan_ports[i];
- ret = port ?
- i40e_aq_add_udp_tunnel(hw, ntohs(port),
+ if (port)
+ ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
I40E_AQC_TUNNEL_TYPE_VXLAN,
- &filter_index, NULL)
- : i40e_aq_del_udp_tunnel(hw, i, NULL);
+ NULL, NULL);
+ else
+ ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
if (ret) {
- dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
- port ? "adding" : "deleting",
- ntohs(port), port ? i : i);
-
+ dev_info(&pf->pdev->dev,
+ "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+ port ? "add" : "delete",
+ ntohs(port), i, ret,
+ pf->hw.aq.asq_last_status);
pf->vxlan_ports[i] = 0;
- } else {
- dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
- port ? "Added" : "Deleted",
- ntohs(port), port ? i : filter_index);
}
}
}
@@ -6728,6 +6856,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->idx = vsi_idx;
vsi->rx_itr_setting = pf->rx_itr_default;
vsi->tx_itr_setting = pf->tx_itr_default;
+ vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
+ pf->rss_table_size : 64;
vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list);
@@ -6808,7 +6938,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
goto unlock_vsi;
}
- /* updates the pf for this cleared vsi */
+ /* updates the PF for this cleared vsi */
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
@@ -6921,15 +7051,14 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
*
* Work with the OS to set up the MSIX vectors needed.
*
- * Returns 0 on success, negative on failure
+ * Returns the number of vectors reserved or negative on failure
**/
static int i40e_init_msix(struct i40e_pf *pf)
{
- i40e_status err = 0;
struct i40e_hw *hw = &pf->hw;
- int other_vecs = 0;
+ int vectors_left;
int v_budget, i;
- int vec;
+ int v_actual;
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
return -ENODEV;
@@ -6951,24 +7080,62 @@ static int i40e_init_msix(struct i40e_pf *pf)
* If we can't get what we want, we'll simplify to nearly nothing
* and try again. If that still fails, we punt.
*/
- pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
- pf->num_vmdq_msix = pf->num_vmdq_qps;
- other_vecs = 1;
- other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
- other_vecs++;
-
- /* Scale down if necessary, and the rings will share vectors */
- pf->num_lan_msix = min_t(int, pf->num_lan_msix,
- (hw->func_caps.num_msix_vectors - other_vecs));
- v_budget = pf->num_lan_msix + other_vecs;
+ vectors_left = hw->func_caps.num_msix_vectors;
+ v_budget = 0;
+
+ /* reserve one vector for miscellaneous handler */
+ if (vectors_left) {
+ v_budget++;
+ vectors_left--;
+ }
+
+ /* reserve vectors for the main PF traffic queues */
+ pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+ vectors_left -= pf->num_lan_msix;
+ v_budget += pf->num_lan_msix;
+
+ /* reserve one vector for sideband flow director */
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (vectors_left) {
+ v_budget++;
+ vectors_left--;
+ } else {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ }
+ }
#ifdef I40E_FCOE
+ /* can we reserve enough for FCoE? */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- pf->num_fcoe_msix = pf->num_fcoe_qps;
+ if (!vectors_left)
+ pf->num_fcoe_msix = 0;
+ else if (vectors_left >= pf->num_fcoe_qps)
+ pf->num_fcoe_msix = pf->num_fcoe_qps;
+ else
+ pf->num_fcoe_msix = 1;
v_budget += pf->num_fcoe_msix;
+ vectors_left -= pf->num_fcoe_msix;
}
+
#endif
+ /* any vectors left over go for VMDq support */
+ if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
+ int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
+ int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
+
+ /* if we're short on vectors for what's desired, we limit
+ * the queues per vmdq. If this is still more than are
+ * available, the user will need to change the number of
+ * queues/vectors used by the PF later with the ethtool
+ * channels command
+ */
+ if (vmdq_vecs < vmdq_vecs_wanted)
+ pf->num_vmdq_qps = 1;
+ pf->num_vmdq_msix = pf->num_vmdq_qps;
+
+ v_budget += vmdq_vecs;
+ vectors_left -= vmdq_vecs;
+ }
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL);
@@ -6977,9 +7144,9 @@ static int i40e_init_msix(struct i40e_pf *pf)
for (i = 0; i < v_budget; i++)
pf->msix_entries[i].entry = i;
- vec = i40e_reserve_msix_vectors(pf, v_budget);
+ v_actual = i40e_reserve_msix_vectors(pf, v_budget);
- if (vec != v_budget) {
+ if (v_actual != v_budget) {
/* If we have limited resources, we will start with no vectors
* for the special features and then allocate vectors to some
* of these features based on the policy and at the end disable
@@ -6992,26 +7159,30 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_msix = 0;
}
- if (vec < I40E_MIN_MSIX) {
+ if (v_actual < I40E_MIN_MSIX) {
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
pf->msix_entries = NULL;
return -ENODEV;
- } else if (vec == I40E_MIN_MSIX) {
+ } else if (v_actual == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
- } else if (vec != v_budget) {
+ } else if (v_actual != v_budget) {
+ int vec;
+
/* reserve the misc vector */
- vec--;
+ vec = v_actual - 1;
/* Scale vector usage down */
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
pf->num_vmdq_vsis = 1;
+ pf->num_vmdq_qps = 1;
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
/* partition out the remaining vectors */
switch (vec) {
@@ -7037,10 +7208,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
vec--;
}
#endif
- pf->num_lan_msix = min_t(int, (vec / 2),
- pf->num_lan_qps);
- pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
- I40E_DEFAULT_NUM_VMDQ_VSI);
+ /* give the rest to the PF */
+ pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
break;
}
}
@@ -7057,7 +7226,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
}
#endif
- return err;
+ return v_actual;
}
/**
@@ -7132,13 +7301,14 @@ err_out:
* i40e_init_interrupt_scheme - Determine proper interrupt scheme
* @pf: board private structure to initialize
**/
-static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
+static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
{
- int err = 0;
+ int vectors = 0;
+ ssize_t size;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
- err = i40e_init_msix(pf);
- if (err) {
+ vectors = i40e_init_msix(pf);
+ if (vectors < 0) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
@@ -7158,18 +7328,32 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) {
dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
- err = pci_enable_msi(pf->pdev);
- if (err) {
- dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
+ vectors = pci_enable_msi(pf->pdev);
+ if (vectors < 0) {
+ dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
+ vectors);
pf->flags &= ~I40E_FLAG_MSI_ENABLED;
}
+ vectors = 1; /* one MSI or Legacy vector */
}
if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
- /* track first vector for misc interrupts */
- err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
+ /* set up vector assignment tracking */
+ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
+ pf->irq_pile = kzalloc(size, GFP_KERNEL);
+ if (!pf->irq_pile) {
+ dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
+ return -ENOMEM;
+ }
+ pf->irq_pile->num_entries = vectors;
+ pf->irq_pile->search_hint = 0;
+
+ /* track first vector for misc interrupts, ignore return */
+ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
+
+ return 0;
}
/**
@@ -7219,6 +7403,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
static int i40e_config_rss(struct i40e_pf *pf)
{
u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
u32 lut = 0;
int i, j;
@@ -7236,15 +7421,14 @@ static int i40e_config_rss(struct i40e_pf *pf)
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+ vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
/* Check capability and Set table size and register per hw expectation*/
reg_val = rd32(hw, I40E_PFQF_CTL_0);
- if (hw->func_caps.rss_table_size == 512) {
+ if (pf->rss_table_size == 512)
reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
- pf->rss_table_size = 512;
- } else {
- pf->rss_table_size = 128;
+ else
reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
- }
wr32(hw, I40E_PFQF_CTL_0, reg_val);
/* Populate the LUT with max no. of queues in round robin fashion */
@@ -7257,7 +7441,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
* If LAN VSI is the only consumer for RSS then this requirement
* is not necessary.
*/
- if (j == pf->rss_size)
+ if (j == vsi->rss_size)
j = 0;
/* lut = 4-byte sliding window of 4 lut entries */
lut = (lut << 8) | (j &
@@ -7281,15 +7465,19 @@ static int i40e_config_rss(struct i40e_pf *pf)
**/
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ int new_rss_size;
+
if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
return 0;
- queue_count = min_t(int, queue_count, pf->rss_size_max);
+ new_rss_size = min_t(int, queue_count, pf->rss_size_max);
- if (queue_count != pf->rss_size) {
+ if (queue_count != vsi->num_queue_pairs) {
+ vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf);
- pf->rss_size = queue_count;
+ pf->rss_size = new_rss_size;
i40e_reset_and_rebuild(pf, true);
i40e_config_rss(pf);
@@ -7299,6 +7487,128 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
}
/**
+ * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
+{
+ i40e_status status;
+ bool min_valid, max_valid;
+ u32 max_bw, min_bw;
+
+ status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
+ &min_valid, &max_valid);
+
+ if (!status) {
+ if (min_valid)
+ pf->npar_min_bw = min_bw;
+ if (max_valid)
+ pf->npar_max_bw = max_bw;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_set_npar_bw_setting - Set BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
+{
+ struct i40e_aqc_configure_partition_bw_data bw_data;
+ i40e_status status;
+
+ /* Set the valid bit for this PF */
+ bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+ bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
+
+ /* Set the new bandwidths */
+ status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
+{
+ /* Commit temporary BW setting to permanent NVM image */
+ enum i40e_admin_queue_err last_aq_status;
+ i40e_status ret;
+ u16 nvm_word;
+
+ if (pf->hw.partition_id != 1) {
+ dev_info(&pf->pdev->dev,
+ "Commit BW only works on partition 1! This is partition %d",
+ pf->hw.partition_id);
+ ret = I40E_NOT_SUPPORTED;
+ goto bw_commit_out;
+ }
+
+ /* Acquire NVM for read access */
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot acquire NVM for read access, err %d: aq_err %d\n",
+ ret, last_aq_status);
+ goto bw_commit_out;
+ }
+
+ /* Read word 0x10 of NVM - SW compatibility word 1 */
+ ret = i40e_aq_read_nvm(&pf->hw,
+ I40E_SR_NVM_CONTROL_WORD,
+ 0x10, sizeof(nvm_word), &nvm_word,
+ false, NULL);
+ /* Save off last admin queue command status before releasing
+ * the NVM
+ */
+ last_aq_status = pf->hw.aq.asq_last_status;
+ i40e_release_nvm(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
+ ret, last_aq_status);
+ goto bw_commit_out;
+ }
+
+ /* Wait a bit for NVM release to complete */
+ msleep(50);
+
+ /* Acquire NVM for write access */
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot acquire NVM for write access, err %d: aq_err %d\n",
+ ret, last_aq_status);
+ goto bw_commit_out;
+ }
+ /* Write it back out unchanged to initiate update NVM,
+ * which will force a write of the shadow (alt) RAM to
+ * the NVM - thus storing the bandwidth values permanently.
+ */
+ ret = i40e_aq_update_nvm(&pf->hw,
+ I40E_SR_NVM_CONTROL_WORD,
+ 0x10, sizeof(nvm_word),
+ &nvm_word, true, NULL);
+ /* Save off last admin queue command status before releasing
+ * the NVM
+ */
+ last_aq_status = pf->hw.aq.asq_last_status;
+ i40e_release_nvm(&pf->hw);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "BW settings NOT SAVED, err %d aq_err %d\n",
+ ret, last_aq_status);
+bw_commit_out:
+
+ return ret;
+}
+
+/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
*
@@ -7324,8 +7634,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
- I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RX_1BUF_ENABLED;
+ I40E_FLAG_MSIX_ENABLED;
+
+ if (iommu_present(&pci_bus_type))
+ pf->flags |= I40E_FLAG_RX_PS_ENABLED;
+ else
+ pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
/* Set default ITR */
pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
@@ -7336,6 +7650,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
*/
pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
pf->rss_size = 1;
+ pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) {
@@ -7347,6 +7662,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
pf->flags |= I40E_FLAG_MFP_ENABLED;
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ if (i40e_get_npar_bw_setting(pf))
+ dev_warn(&pf->pdev->dev,
+ "Could not get NPAR bw settings\n");
+ else
+ dev_info(&pf->pdev->dev,
+ "Min BW = %8.8x, Max BW = %8.8x\n",
+ pf->npar_min_bw, pf->npar_max_bw);
}
/* FW/NVM is not yet fixed in this regard */
@@ -7354,11 +7676,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- /* Setup a counter for fd_atr per pf */
+ /* Setup a counter for fd_atr per PF */
pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- /* Setup a counter for fd_sb per pf */
+ /* Setup a counter for fd_sb per PF */
pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else {
dev_info(&pf->pdev->dev,
@@ -7406,22 +7728,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
pf->qp_pile->search_hint = 0;
- /* set up vector assignment tracking */
- size = sizeof(struct i40e_lump_tracking)
- + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
- pf->irq_pile = kzalloc(size, GFP_KERNEL);
- if (!pf->irq_pile) {
- kfree(pf->qp_pile);
- err = -ENOMEM;
- goto sw_init_done;
- }
- pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
- pf->irq_pile->search_hint = 0;
-
pf->tx_timeout_recovery_level = 1;
mutex_init(&pf->switch_mutex);
+ /* If NPAR is enabled nudge the Tx scheduler */
+ if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
+ i40e_set_npar_bw_setting(pf);
+
sw_init_done:
return err;
}
@@ -7534,7 +7848,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+ netdev_info(netdev, "vxlan port %d already offloaded\n",
+ ntohs(port));
return;
}
@@ -7542,7 +7857,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
next_idx = i40e_get_vxlan_port_idx(pf, 0);
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+ netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
ntohs(port));
return;
}
@@ -7550,8 +7865,9 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* New port: add it and mark its index in the bitmap */
pf->vxlan_ports[next_idx] = port;
pf->pending_vxlan_bitmap |= (1 << next_idx);
-
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
}
/**
@@ -7579,12 +7895,13 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
* and make it pending
*/
pf->vxlan_ports[idx] = 0;
-
pf->pending_vxlan_bitmap |= (1 << idx);
-
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
+ ntohs(port));
} else {
- netdev_warn(netdev, "Port %d was not found, not deleting\n",
+ netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
ntohs(port));
}
}
@@ -7653,6 +7970,118 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
+#ifdef HAVE_BRIDGE_ATTRIBS
+/**
+ * i40e_ndo_bridge_setlink - Set the hardware bridge mode
+ * @dev: the netdev being configured
+ * @nlh: RTNL message
+ *
+ * Inserts a new hardware bridge if not already created and
+ * enables the bridging mode requested (VEB or VEPA). If the
+ * hardware bridge has already been inserted and the request
+ * is to change the mode then that requires a PF reset to
+ * allow rebuild of the components with required hardware
+ * bridge mode enabled.
+ **/
+static int i40e_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = NULL;
+ struct nlattr *attr, *br_spec;
+ int i, rem;
+
+ /* Only for PF VSI for now */
+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ return -EOPNOTSUPP;
+
+ /* Find the HW bridge for PF VSI */
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if ((mode != BRIDGE_MODE_VEPA) &&
+ (mode != BRIDGE_MODE_VEB))
+ return -EINVAL;
+
+ /* Insert a new HW bridge */
+ if (!veb) {
+ veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+ if (veb) {
+ veb->bridge_mode = mode;
+ i40e_config_bridge_mode(veb);
+ } else {
+ /* No Bridge HW offload available */
+ return -ENOENT;
+ }
+ break;
+ } else if (mode != veb->bridge_mode) {
+ /* Existing HW bridge but different mode needs reset */
+ veb->bridge_mode = mode;
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_ndo_bridge_getlink - Get the hardware bridge mode
+ * @skb: skb buff
+ * @pid: process id
+ * @seq: RTNL message seq #
+ * @dev: the netdev being configured
+ * @filter_mask: unused
+ *
+ * Return the mode in which the hardware bridge is operating in
+ * i.e VEB or VEPA.
+ **/
+#ifdef HAVE_BRIDGE_FILTER
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 __always_unused filter_mask)
+#else
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev)
+#endif /* HAVE_BRIDGE_FILTER */
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = NULL;
+ int i;
+
+ /* Only for PF VSI for now */
+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ return -EOPNOTSUPP;
+
+ /* Find the HW bridge for the PF VSI */
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+
+ if (!veb)
+ return 0;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode);
+}
+#endif /* HAVE_BRIDGE_ATTRIBS */
+
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -7687,6 +8116,10 @@ static const struct net_device_ops i40e_netdev_ops = {
#endif
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
+#ifdef HAVE_BRIDGE_ATTRIBS
+ .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
+ .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
+#endif /* HAVE_BRIDGE_ATTRIBS */
};
/**
@@ -7799,6 +8232,30 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
}
/**
+ * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
+ * @vsi: the VSI being queried
+ *
+ * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
+ **/
+int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
+{
+ struct i40e_veb *veb;
+ struct i40e_pf *pf = vsi->back;
+
+ /* Uplink is not a bridge so default to VEB */
+ if (vsi->veb_idx == I40E_NO_VEB)
+ return 1;
+
+ veb = pf->veb[vsi->veb_idx];
+ /* Uplink is a bridge in VEPA mode */
+ if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
+ return 0;
+
+ /* Uplink is a bridge in VEB mode */
+ return 1;
+}
+
+/**
* i40e_add_vsi - Add a VSI to the switch
* @vsi: the VSI being configured
*
@@ -7830,11 +8287,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get pf vsi config, err %d, aq_err %d\n",
+ "couldn't get PF vsi config, err %d, aq_err %d\n",
ret, pf->hw.aq.asq_last_status);
return -ENOENT;
}
- memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info = ctxt.info;
vsi->info.valid_sections = 0;
vsi->seid = ctxt.seid;
@@ -7883,12 +8340,14 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
- ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- ctxt.info.valid_sections |=
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
- ctxt.info.switch_id =
+ ctxt.info.switch_id =
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
@@ -7896,16 +8355,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
- ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
- ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-
/* This VSI is connected to VEB so the switch_id
* should be set to zero by default.
*/
- ctxt.info.switch_id = 0;
- ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
/* Setup the VSI tx/rx queue map for TC0 only for now */
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
@@ -7915,15 +8376,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
ctxt.uplink_seid = vsi->uplink_seid;
- ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_VF;
- ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-
/* This VSI is connected to VEB so the switch_id
* should be set to zero by default.
*/
- ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
@@ -7961,7 +8425,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = -ENOENT;
goto err;
}
- memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info = ctxt.info;
vsi->info.valid_sections = 0;
vsi->seid = ctxt.seid;
vsi->id = ctxt.vsi_number;
@@ -8281,7 +8745,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
__func__);
return NULL;
}
- i40e_enable_pf_switch_lb(pf);
+ i40e_config_bridge_mode(veb);
}
for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
@@ -8724,7 +9188,7 @@ err_alloc:
}
/**
- * i40e_setup_pf_switch_element - set pf vars based on switch type
+ * i40e_setup_pf_switch_element - set PF vars based on switch type
* @pf: board private structure
* @ele: element we are building info from
* @num_reported: total number of elements
@@ -8930,15 +9394,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_config_rss(pf);
/* fill in link information and enable LSE reporting */
- i40e_update_link_info(&pf->hw, true);
- i40e_link_event(pf);
-
- /* Initialize user-specific link properties */
- pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
- I40E_AQ_AN_COMPLETED) ? true : false);
-
- /* fill in link information and enable LSE reporting */
- i40e_update_link_info(&pf->hw, true);
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
i40e_link_event(pf);
/* Initialize user-specific link properties */
@@ -9008,7 +9464,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
- pf->num_lan_qps = pf->rss_size_max;
+ pf->num_lan_qps = max_t(int, pf->rss_size_max,
+ num_online_cpus());
+ pf->num_lan_qps = min_t(int, pf->num_lan_qps,
+ pf->hw.func_caps.num_tx_qp);
+
queues_left -= pf->num_lan_qps;
}
@@ -9061,7 +9521,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
* i40e_setup_pf_filter_control - Setup PF static filter control
* @pf: PF to be setup
*
- * i40e_setup_pf_filter_control sets up a pf's initial filter control
+ * i40e_setup_pf_filter_control sets up a PF's initial filter control
* settings. If PE/FCoE are enabled then it will also set the per PF
* based filter sizes required for them. It also enables Flow director,
* ethertype and macvlan type filter settings for the pf.
@@ -9106,8 +9566,10 @@ static void i40e_print_features(struct i40e_pf *pf)
#ifdef CONFIG_PCI_IOV
buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
#endif
- buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs);
+ buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
+ pf->hw.func_caps.num_vsis,
+ pf->vsi[pf->lan_vsi]->num_queue_pairs,
+ pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
if (pf->flags & I40E_FLAG_RSS_ENABLED)
buf += sprintf(buf, "RSS ");
@@ -9136,14 +9598,16 @@ static void i40e_print_features(struct i40e_pf *pf)
* @pdev: PCI device information struct
* @ent: entry in i40e_pci_tbl
*
- * i40e_probe initializes a pf identified by a pci_dev structure.
- * The OS initialization, configuring of the pf private structure,
+ * i40e_probe initializes a PF identified by a pci_dev structure.
+ * The OS initialization, configuring of the PF private structure,
* and a hardware reset occur.
*
* Returns 0 on success, negative on failure
**/
static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ unsigned long ioremap_len;
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
@@ -9195,8 +9659,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = &pf->hw;
hw->back = pf;
- hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+
+ ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
+ I40E_MAX_CSR_SPACE);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
if (!hw->hw_addr) {
err = -EIO;
dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
@@ -9274,7 +9741,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
-
i40e_verify_eeprom(pf);
/* Rev 0 hardware was never productized */
@@ -9367,7 +9833,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up the main switch operations */
i40e_determine_queue_usage(pf);
- i40e_init_interrupt_scheme(pf);
+ err = i40e_init_interrupt_scheme(pf);
+ if (err)
+ goto err_switch_setup;
/* The number of VSIs reported by the FW is the minimum guaranteed
* to us; HW supports far more and we share the remaining pool with
@@ -9409,13 +9877,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
- msleep(75);
- err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
- if (err) {
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4)) {
+ msleep(75);
+ err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (err)
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
}
-
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
* ends up disabled forever.
@@ -9499,6 +9968,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
}
+ /* get the requested speeds from the fw */
+ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
+ if (err)
+ dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
+ err);
+ pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+
/* print a string summarizing features */
i40e_print_features(pf);
@@ -9517,7 +9993,6 @@ err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
err_init_lan_hmc:
kfree(pf->qp_pile);
- kfree(pf->irq_pile);
err_sw_init:
err_adminq_setup:
(void)i40e_shutdown_adminq(hw);
@@ -9617,7 +10092,6 @@ static void i40e_remove(struct pci_dev *pdev)
}
kfree(pf->qp_pile);
- kfree(pf->irq_pile);
kfree(pf->vsi);
iounmap(pf->hw.hw_addr);
@@ -9760,6 +10234,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ i40e_fdir_teardown(pf);
+
rtnl_lock();
i40e_prep_for_reset(pf);
rtnl_unlock();
@@ -9844,6 +10320,7 @@ static int __init i40e_init_module(void)
pr_info("%s: %s - version %s\n", i40e_driver_name,
i40e_driver_string, i40e_driver_version_str);
pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
+
i40e_dbg_init();
return pci_register_driver(&i40e_driver);
}