diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-09-25 11:13:51 +0200 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-09-25 11:13:51 +0200 |
commit | 56fb05093756ed55ba1cdf5d432a68004da67860 (patch) | |
tree | 87dc333d4f606f375d6253eb5b8ef6f04674ffa6 /drivers | |
parent | b6a153b0829afbc63032e8271d3ca9a19e704e03 (diff) | |
parent | da274362a7bd9ab3a6e46d15945029145ebce672 (diff) |
Merge v6.12.49linux-rolling-lts
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
45 files changed, 413 insertions, 254 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b585c321d345..b02ff92bae0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8462,7 +8462,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev, static void manage_dm_interrupts(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dm_crtc_state *acrtc_state) -{ +{ /* + * We cannot be sure that the frontend index maps to the same + * backend index - some even map to more than one. + * So we have to go through the CRTC to find the right IRQ. + */ + int irq_type = amdgpu_display_crtc_idx_to_irq_type( + adev, + acrtc->crtc_id); + struct drm_device *dev = adev_to_drm(adev); + struct drm_vblank_crtc_config config = {0}; struct dc_crtc_timing *timing; int offdelay; @@ -8515,7 +8524,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, drm_crtc_vblank_on_config(&acrtc->base, &config); + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 2, 0): + if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n"); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n"); +#endif + } + } else { + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 2, 0): +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n"); +#endif + if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n"); + } + drm_crtc_vblank_off(&acrtc->base); } } diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index c036bbc92ba9..7244c3abb7f9 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2683,7 +2683,7 @@ static int anx7625_i2c_probe(struct i2c_client *client) ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq, NULL, anx7625_intr_hpd_isr, IRQF_TRIGGER_FALLING | - IRQF_ONESHOT, + IRQF_ONESHOT | IRQF_NO_AUTOEN, "anx7625-intp", platform); if (ret) { DRM_DEV_ERROR(dev, "fail to request irq\n"); @@ -2753,8 +2753,10 @@ static int anx7625_i2c_probe(struct i2c_client *client) } /* Add work function */ - if (platform->pdata.intp_irq) + if (platform->pdata.intp_irq) { + enable_irq(platform->pdata.intp_irq); queue_work(platform->workqueue, &platform->work); + } if (platform->pdata.audio_en) anx7625_register_audio(dev, platform); diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index 9ba2a667a1f3..b18bdb2daddf 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -2054,8 +2054,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge, mhdp_state = to_cdns_mhdp_bridge_state(new_state); mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode); - if (!mhdp_state->current_mode) - return; + if (!mhdp_state->current_mode) { + ret = -EINVAL; + goto out; + } drm_mode_set_name(mhdp_state->current_mode); diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c index b804234a6551..9e1236a9ec67 100644 --- a/drivers/gpu/drm/xe/xe_tile_sysfs.c +++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c @@ -44,16 +44,18 @@ int xe_tile_sysfs_init(struct xe_tile *tile) kt->tile = tile; err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id); - if (err) { - kobject_put(&kt->base); - return err; - } + if (err) + goto err_object; tile->sysfs = &kt->base; err = xe_vram_freq_sysfs_init(tile); if (err) - return err; + goto err_object; return devm_add_action_or_reset(xe->drm.dev, tile_sysfs_fini, tile); + +err_object: + kobject_put(&kt->base); + return err; } diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index a4845d4213b0..fc5f0e135193 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -239,8 +239,8 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) pfence = xe_preempt_fence_create(q, q->lr.context, ++q->lr.seqno); - if (!pfence) { - err = -ENOMEM; + if (IS_ERR(pfence)) { + err = PTR_ERR(pfence); goto out_fini; } diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 6fb2f2919ab1..a14ee649d3da 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -545,6 +545,7 @@ struct gcr3_tbl_info { }; struct amd_io_pgtable { + seqcount_t seqcount; /* Protects root/mode update */ struct io_pgtable pgtbl; int mode; u64 *root; diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index f3399087859f..91cc1e0c663d 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/types.h> #include <linux/dma-mapping.h> +#include <linux/seqlock.h> #include <asm/barrier.h> @@ -144,8 +145,11 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable, *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root)); + write_seqcount_begin(&pgtable->seqcount); pgtable->root = pte; pgtable->mode += 1; + write_seqcount_end(&pgtable->seqcount); + amd_iommu_update_and_flush_device_table(domain); pte = NULL; @@ -167,6 +171,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable, { unsigned long last_addr = address + (page_size - 1); struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; + unsigned int seqcount; int level, end_lvl; u64 *pte, *page; @@ -184,8 +189,14 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable, } - level = pgtable->mode - 1; - pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; + do { + seqcount = read_seqcount_begin(&pgtable->seqcount); + + level = pgtable->mode - 1; + pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; + } while (read_seqcount_retry(&pgtable->seqcount, seqcount)); + + address = PAGE_SIZE_ALIGN(address, page_size); end_lvl = PAGE_SIZE_LEVEL(page_size); @@ -262,6 +273,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable, unsigned long *page_size) { int level; + unsigned int seqcount; u64 *pte; *page_size = 0; @@ -269,8 +281,12 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable, if (address > PM_LEVEL_SIZE(pgtable->mode)) return NULL; - level = pgtable->mode - 1; - pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; + do { + seqcount = read_seqcount_begin(&pgtable->seqcount); + level = pgtable->mode - 1; + pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; + } while (read_seqcount_retry(&pgtable->seqcount, seqcount)); + *page_size = PTE_LEVEL_PAGE_SIZE(level); while (level > 0) { @@ -552,6 +568,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo if (!pgtable->root) return NULL; pgtable->mode = PAGE_MODE_3_LEVEL; + seqcount_init(&pgtable->seqcount); cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap; cfg->ias = IOMMU_IN_ADDR_BIT_SIZE; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b300f72cf01e..32d6710e264a 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1768,6 +1768,10 @@ static void switch_to_super_page(struct dmar_domain *domain, unsigned long lvl_pages = lvl_to_nr_pages(level); struct dma_pte *pte = NULL; + if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) || + !IS_ALIGNED(end_pfn + 1, lvl_pages))) + return; + while (start_pfn <= end_pfn) { if (!pte) pte = pfn_to_dma_pte(domain, start_pfn, &level, @@ -1844,7 +1848,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, unsigned long pages_to_remove; pteval |= DMA_PTE_LARGE_PAGE; - pages_to_remove = min_t(unsigned long, nr_pages, + pages_to_remove = min_t(unsigned long, + round_down(nr_pages, lvl_pages), nr_pte_to_next_page(pte) * lvl_pages); end_pfn = iov_pfn + pages_to_remove - 1; switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 163a5bbd485f..c69696d2540a 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3811,8 +3811,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) struct raid_set *rs = ti->private; unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors); - limits->io_min = chunk_size_bytes; - limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs); + if (chunk_size_bytes) { + limits->io_min = chunk_size_bytes; + limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs); + } } static void raid_presuspend(struct dm_target *ti) diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index c68dc1653cfd..cbf4c102c4d0 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -457,11 +457,15 @@ static void stripe_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct stripe_c *sc = ti->private; - unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT; + unsigned int io_min, io_opt; limits->chunk_sectors = sc->chunk_size; - limits->io_min = chunk_size; - limits->io_opt = chunk_size * sc->stripes; + + if (!check_shl_overflow(sc->chunk_size, SECTOR_SHIFT, &io_min) && + !check_mul_overflow(io_min, sc->stripes, &io_opt)) { + limits->io_min = io_min; + limits->io_opt = io_opt; + } } static struct target_type stripe_target = { diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index 12df4ff9eeee..878fda43b7f7 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data, host->pio_ptr = NULL; host->pio_size = 0; } else { - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, mmc_get_dma_dir(data)); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 52ff0f9e04e0..00204e42de2e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2117,6 +2117,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW && BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && + bond_has_slaves(bond) && memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) { /* Set slave to random address to avoid duplicate mac * address in later fail over. @@ -3337,7 +3338,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave) /* Find out through which dev should the packet go */ memset(&fl6, 0, sizeof(struct flowi6)); fl6.daddr = targets[i]; - fl6.flowi6_oif = bond->dev->ifindex; dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6); if (dst->error) { diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index a9040c42d2ff..6e97a5a7daaf 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -4230,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) cnic_bnx2x_delete_wait(dev, 0); - cancel_delayed_work(&cp->delete_task); - flush_workqueue(cnic_wq); + cancel_delayed_work_sync(&cp->delete_task); if (atomic_read(&cp->iscsi_conn) != 0) netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index de8a6ce86ad7..12105ffb5dac 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -126,7 +126,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, oct->io_qmask.iq |= BIT_ULL(iq_no); /* Set the 32B/64B mode for each input queue */ - oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); + oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no); iq->iqcmd_64B = (conf->instr_type == 64); oct->fn_list.setup_iq_regs(oct, iq_no); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index cbd3859ea475..980daecab8ea 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -2735,7 +2735,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); goto err_get_attr; } - ethsw->bpid = dpbp_attrs.id; + ethsw->bpid = dpbp_attrs.bpid; return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index c006f716a3bd..ca7517a68a2c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -947,9 +947,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, if (!eop_desc) break; - /* prevent any other reads prior to eop_desc */ - smp_rmb(); - i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* we have caught up to head, no work left to do */ if (tx_head == tx_desc) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 2960709f6b62..0e699a0432c5 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -372,9 +372,6 @@ struct ice_vsi { spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ atomic_t *arfs_last_fltr_id; - u16 max_frame; - u16 rx_buf_len; - struct ice_aqc_vsi_props info; /* VSI properties */ struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 4a9a6899fc45..98c3764fed39 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -445,7 +445,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Max packet size for this queue - must not be set to a larger value * than 5 x DBUF */ - rlan_ctx.rxmax = min_t(u32, vsi->max_frame, + rlan_ctx.rxmax = min_t(u32, ring->max_frame, ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len); /* Rx queue threshold in units of 64 */ @@ -541,8 +541,6 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) u32 num_bufs = ICE_RX_DESC_UNUSED(ring); int err; - ring->rx_buf_len = ring->vsi->rx_buf_len; - if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) { if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, @@ -641,21 +639,25 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) /** * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length * @vsi: VSI + * @ring: Rx ring to configure + * + * Determine the maximum frame size and Rx buffer length to use for a PF VSI. + * Set these in the associated Rx ring structure. */ -static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) +static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring) { if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { - vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; - vsi->rx_buf_len = ICE_RXBUF_1664; + ring->max_frame = ICE_MAX_FRAME_LEGACY_RX; + ring->rx_buf_len = ICE_RXBUF_1664; #if (PAGE_SIZE < 8192) } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && (vsi->netdev->mtu <= ETH_DATA_LEN)) { - vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; - vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; + ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; + ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; #endif } else { - vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; - vsi->rx_buf_len = ICE_RXBUF_3072; + ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; + ring->rx_buf_len = ICE_RXBUF_3072; } } @@ -670,15 +672,15 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) { u16 i; - if (vsi->type == ICE_VSI_VF) - goto setup_rings; - - ice_vsi_cfg_frame_size(vsi); -setup_rings: /* set up individual rings */ ice_for_each_rxq(vsi, i) { - int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); + struct ice_rx_ring *ring = vsi->rx_rings[i]; + int err; + + if (vsi->type != ICE_VSI_VF) + ice_vsi_cfg_frame_size(vsi, ring); + err = ice_vsi_cfg_rxq(ring); if (err) return err; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index cde69f568665..431a6ed498a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -865,10 +865,6 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, rx_buf->page_offset, size); sinfo->xdp_frags_size += size; - /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail() - * can pop off frags but driver has to handle it on its own - */ - rx_ring->nr_frags = sinfo->nr_frags; if (page_is_pfmemalloc(rx_buf->page)) xdp_buff_set_frag_pfmemalloc(xdp); @@ -939,20 +935,20 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, /** * ice_get_pgcnts - grab page_count() for gathered fragments * @rx_ring: Rx descriptor ring to store the page counts on + * @ntc: the next to clean element (not included in this frame!) * * This function is intended to be called right before running XDP * program so that the page recycling mechanism will be able to take * a correct decision regarding underlying pages; this is done in such * way as XDP program can change the refcount of page */ -static void ice_get_pgcnts(struct ice_rx_ring *rx_ring) +static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc) { - u32 nr_frags = rx_ring->nr_frags + 1; u32 idx = rx_ring->first_desc; struct ice_rx_buf *rx_buf; u32 cnt = rx_ring->count; - for (int i = 0; i < nr_frags; i++) { + while (idx != ntc) { rx_buf = &rx_ring->rx_buf[idx]; rx_buf->pgcnt = page_count(rx_buf->page); @@ -1125,62 +1121,51 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) } /** - * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags + * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame * @rx_ring: Rx ring with all the auxiliary data * @xdp: XDP buffer carrying linear + frags part - * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage - * @ntc: a current next_to_clean value to be stored at rx_ring + * @ntc: the next to clean element (not included in this frame!) * @verdict: return code from XDP program execution * - * Walk through gathered fragments and satisfy internal page - * recycle mechanism; we take here an action related to verdict - * returned by XDP program; + * Called after XDP program is completed, or on error with verdict set to + * ICE_XDP_CONSUMED. + * + * Walk through buffers from first_desc to the end of the frame, releasing + * buffers and satisfying internal page recycle mechanism. The action depends + * on verdict from XDP program. */ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - u32 *xdp_xmit, u32 ntc, u32 verdict) + u32 ntc, u32 verdict) { - u32 nr_frags = rx_ring->nr_frags + 1; u32 idx = rx_ring->first_desc; u32 cnt = rx_ring->count; - u32 post_xdp_frags = 1; struct ice_rx_buf *buf; - int i; + u32 xdp_frags = 0; + int i = 0; if (unlikely(xdp_buff_has_frags(xdp))) - post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags; + xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; - for (i = 0; i < post_xdp_frags; i++) { + while (idx != ntc) { buf = &rx_ring->rx_buf[idx]; + if (++idx == cnt) + idx = 0; - if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) { + /* An XDP program could release fragments from the end of the + * buffer. For these, we need to keep the pagecnt_bias as-is. + * To do this, only adjust pagecnt_bias for fragments up to + * the total remaining after the XDP program has run. + */ + if (verdict != ICE_XDP_CONSUMED) ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - *xdp_xmit |= verdict; - } else if (verdict & ICE_XDP_CONSUMED) { + else if (i++ <= xdp_frags) buf->pagecnt_bias++; - } else if (verdict == ICE_XDP_PASS) { - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - } ice_put_rx_buf(rx_ring, buf); - - if (++idx == cnt) - idx = 0; - } - /* handle buffers that represented frags released by XDP prog; - * for these we keep pagecnt_bias as-is; refcount from struct page - * has been decremented within XDP prog and we do not have to increase - * the biased refcnt - */ - for (; i < nr_frags; i++) { - buf = &rx_ring->rx_buf[idx]; - ice_put_rx_buf(rx_ring, buf); - if (++idx == cnt) - idx = 0; } xdp->data = NULL; rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; } /** @@ -1260,6 +1245,10 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) /* retrieve a buffer from the ring */ rx_buf = ice_get_rx_buf(rx_ring, size, ntc); + /* Increment ntc before calls to ice_put_rx_mbuf() */ + if (++ntc == cnt) + ntc = 0; + if (!xdp->data) { void *hard_start; @@ -1268,24 +1257,23 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); xdp_buff_clear_frags_flag(xdp); } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { - ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED); + ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED); break; } - if (++ntc == cnt) - ntc = 0; /* skip if it is NOP desc */ if (ice_is_non_eop(rx_ring, rx_desc)) continue; - ice_get_pgcnts(rx_ring); + ice_get_pgcnts(rx_ring, ntc); xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); if (xdp_verdict == ICE_XDP_PASS) goto construct_skb; total_rx_bytes += xdp_get_buff_len(xdp); total_rx_pkts++; - ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); + xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR); continue; construct_skb: @@ -1298,7 +1286,7 @@ construct_skb: rx_ring->ring_stats->rx_stats.alloc_buf_failed++; xdp_verdict = ICE_XDP_CONSUMED; } - ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); if (!skb) break; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 7130992d4177..a13531c21d4a 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -357,9 +357,9 @@ struct ice_rx_ring { struct ice_tx_ring *xdp_ring; struct ice_rx_ring *next; /* pointer to next ring in q_vector */ struct xsk_buff_pool *xsk_pool; - u32 nr_frags; - dma_addr_t dma; /* physical address of ring */ + u16 max_frame; u16 rx_buf_len; + dma_addr_t dma; /* physical address of ring */ u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 87ffd25b268a..471d64d202b7 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -1748,19 +1748,18 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || qpi->rxq.databuffer_size < 1024)) goto error_param; - vsi->rx_buf_len = qpi->rxq.databuffer_size; - ring->rx_buf_len = vsi->rx_buf_len; + ring->rx_buf_len = qpi->rxq.databuffer_size; if (qpi->rxq.max_pkt_size > max_frame_size || qpi->rxq.max_pkt_size < 64) goto error_param; - vsi->max_frame = qpi->rxq.max_pkt_size; + ring->max_frame = qpi->rxq.max_pkt_size; /* add space for the port VLAN since the VF driver is * not expected to account for it in the MTU * calculation */ if (ice_vf_is_port_vlan_ena(vf)) - vsi->max_frame += VLAN_HLEN; + ring->max_frame += VLAN_HLEN; if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n", diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 323db1e2be38..79d5fc5ac4fc 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -336,6 +336,7 @@ struct igc_adapter { /* LEDs */ struct mutex led_mutex; struct igc_led_classdev *leds; + bool leds_available; }; void igc_up(struct igc_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index aadc0667fa04..9ba41a427e14 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -7169,8 +7169,14 @@ static int igc_probe(struct pci_dev *pdev, if (IS_ENABLED(CONFIG_IGC_LEDS)) { err = igc_led_setup(adapter); - if (err) - goto err_register; + if (err) { + netdev_warn_once(netdev, + "LED init failed (%d); continuing without LED support\n", + err); + adapter->leds_available = false; + } else { + adapter->leds_available = true; + } } return 0; @@ -7226,7 +7232,7 @@ static void igc_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->watchdog_task); hrtimer_cancel(&adapter->hrtimer); - if (IS_ENABLED(CONFIG_IGC_LEDS)) + if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available) igc_led_free(adapter); /* Release control of h/w to f/w. If f/w is AMT enabled, this diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c index e6eb98d70f3c..6334b68f28d7 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c @@ -177,6 +177,7 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id, dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n"); return; } + ether_addr_copy(oct->vf_info[vf_id].mac_addr, rsp->s_set_mac.mac_addr); rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; } @@ -186,6 +187,8 @@ static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id, { int err; + /* Reset VF-specific information maintained by the PF */ + memset(&oct->vf_info[vf_id], 0, sizeof(struct octep_pfvf_info)); err = octep_ctrl_net_dev_remove(oct, vf_id); if (err) { rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 63130ba37e9d..69b435ed8fbb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -491,7 +491,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf) if (!ptp) return; - cancel_delayed_work(&pfvf->ptp->synctstamp_work); + cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work); ptp_clock_unregister(ptp->ptp_clock); kfree(ptp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 6176457b846b..de2327ffb0f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -135,8 +135,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv) if (up) { netdev_info(priv->netdev, "Link up\n"); netif_carrier_on(priv->netdev); - mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu, - NULL, NULL, NULL); } else { netdev_info(priv->netdev, "Link down\n"); netif_carrier_off(priv->netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 18ec392d1740..b561358474c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1497,12 +1497,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { static int mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { - struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev)); struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); + struct net_device *netdev; + struct mlx5e_priv *priv; + int err; + + netdev = mlx5_uplink_netdev_get(dev); + if (!netdev) + return 0; + priv = netdev_priv(netdev); rpriv->netdev = priv->netdev; - return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile, - rpriv); + err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile, + rpriv); + mlx5_uplink_netdev_put(dev, netdev); + return err; } static void @@ -1629,8 +1638,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) { struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); struct net_device *netdev = rpriv->netdev; - struct mlx5e_priv *priv = netdev_priv(netdev); - void *ppriv = priv->ppriv; + struct mlx5e_priv *priv; + void *ppriv; + + if (!netdev) { + ppriv = rpriv; + goto free_ppriv; + } + + priv = netdev_priv(netdev); + ppriv = priv->ppriv; if (rep->vport == MLX5_VPORT_UPLINK) { mlx5e_vport_uplink_rep_unload(rpriv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 02a3563f51ad..d8c304427e2a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -733,6 +733,7 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) speed = lksettings.base.speed; out: + mlx5_uplink_netdev_put(mdev, slave); return speed; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h index 37d5f445598c..a7486e6d0d5e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h @@ -52,7 +52,20 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev) { - return mdev->mlx5e_res.uplink_netdev; + struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res; + struct net_device *netdev; + + mutex_lock(&mlx5e_res->uplink_netdev_lock); + netdev = mlx5e_res->uplink_netdev; + netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL); + mutex_unlock(&mlx5e_res->uplink_netdev_lock); + return netdev; +} + +static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev, + struct net_device *netdev) +{ + netdev_put(netdev, &mdev->mlx5e_res.tracker); } struct mlx5_sd; diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 998586872599..c692d2e878b2 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev) struct ns83820 *dev = PRIV(ndev); struct rx_info *info = &dev->rx_info; unsigned next_rx; - int rx_rc, len; + int len; u32 cmdsts; __le32 *desc; unsigned long flags; @@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev) if (likely(CMDSTS_OK & cmdsts)) { #endif skb_put(skb, len); - if (unlikely(!skb)) + if (unlikely(!skb)) { + ndev->stats.rx_dropped++; goto netdev_mangle_me_harder_failed; + } if (cmdsts & CMDSTS_DEST_MULTI) ndev->stats.multicast++; ndev->stats.rx_packets++; @@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev) __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag); } #endif - rx_rc = netif_rx(skb); - if (NET_RX_DROP == rx_rc) { -netdev_mangle_me_harder_failed: - ndev->stats.rx_dropped++; - } + netif_rx(skb); } else { dev_kfree_skb_irq(skb); } +netdev_mangle_me_harder_failed: nr++; next_rx = info->next_rx; desc = info->descs + (DESC_SIZE * next_rx); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index f67be4b8ad43..523cbd91baf4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -4461,10 +4461,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, goto out; } - /* Add override window info to buffer */ + /* Add override window info to buffer, preventing buffer overflow */ override_window_dwords = - qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * - PROTECTION_OVERRIDE_ELEMENT_DWORDS; + min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * + PROTECTION_OVERRIDE_ELEMENT_DWORDS, + PROTECTION_OVERRIDE_DEPTH_DWORDS); if (override_window_dwords) { addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW); offset += qed_grc_dump_addr_range(p_hwfn, diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index c48c2de6f961..e1e472b3a301 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2051,6 +2051,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, rq->comp_ring.gen = VMXNET3_INIT_GEN; rq->comp_ring.next2proc = 0; + + if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) + xdp_rxq_info_unreg(&rq->xdp_rxq); + page_pool_destroy(rq->page_pool); + rq->page_pool = NULL; } @@ -2091,11 +2096,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, } } - if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) - xdp_rxq_info_unreg(&rq->xdp_rxq); - page_pool_destroy(rq->page_pool); - rq->page_pool = NULL; - if (rq->data_ring.base) { dma_free_coherent(&adapter->pdev->dev, rq->rx_ring[0].size * rq->data_ring.desc_size, diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c index 131388886acb..cfabd5aebb54 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c @@ -41,10 +41,10 @@ static const struct wilc_cfg_word g_cfg_word[] = { }; static const struct wilc_cfg_str g_cfg_str[] = { - {WID_FIRMWARE_VERSION, NULL}, - {WID_MAC_ADDR, NULL}, - {WID_ASSOC_RES_INFO, NULL}, - {WID_NIL, NULL} + {WID_FIRMWARE_VERSION, 0, NULL}, + {WID_MAC_ADDR, 0, NULL}, + {WID_ASSOC_RES_INFO, 0, NULL}, + {WID_NIL, 0, NULL} }; #define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R' @@ -147,44 +147,58 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size) switch (FIELD_GET(WILC_WID_TYPE, wid)) { case WID_CHAR: + len = 3; + if (len + 2 > size) + return; + while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid) i++; if (cfg->b[i].id == wid) cfg->b[i].val = info[4]; - len = 3; break; case WID_SHORT: + len = 4; + if (len + 2 > size) + return; + while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid) i++; if (cfg->hw[i].id == wid) cfg->hw[i].val = get_unaligned_le16(&info[4]); - len = 4; break; case WID_INT: + len = 6; + if (len + 2 > size) + return; + while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid) i++; if (cfg->w[i].id == wid) cfg->w[i].val = get_unaligned_le32(&info[4]); - len = 6; break; case WID_STR: + len = 2 + get_unaligned_le16(&info[2]); + while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid) i++; - if (cfg->s[i].id == wid) + if (cfg->s[i].id == wid) { + if (len > cfg->s[i].len || (len + 2 > size)) + return; + memcpy(cfg->s[i].str, &info[2], - get_unaligned_le16(&info[2]) + 2); + len); + } - len = 2 + get_unaligned_le16(&info[2]); break; default: @@ -384,12 +398,15 @@ int wilc_wlan_cfg_init(struct wilc *wl) /* store the string cfg parameters */ wl->cfg.s[i].id = WID_FIRMWARE_VERSION; wl->cfg.s[i].str = str_vals->firmware_version; + wl->cfg.s[i].len = sizeof(str_vals->firmware_version); i++; wl->cfg.s[i].id = WID_MAC_ADDR; wl->cfg.s[i].str = str_vals->mac_address; + wl->cfg.s[i].len = sizeof(str_vals->mac_address); i++; wl->cfg.s[i].id = WID_ASSOC_RES_INFO; wl->cfg.s[i].str = str_vals->assoc_rsp; + wl->cfg.s[i].len = sizeof(str_vals->assoc_rsp); i++; wl->cfg.s[i].id = WID_NIL; wl->cfg.s[i].str = NULL; diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h index 7038b74f8e8f..5ae74bced7d7 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h +++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h @@ -24,12 +24,13 @@ struct wilc_cfg_word { struct wilc_cfg_str { u16 id; + u16 len; u8 *str; }; struct wilc_cfg_str_vals { - u8 mac_address[7]; - u8 firmware_version[129]; + u8 mac_address[8]; + u8 firmware_version[130]; u8 assoc_rsp[WILC_MAX_ASSOC_RESP_FRAME_SIZE]; }; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9e223574db7f..24d82d35041b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -890,6 +890,15 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, u32 upper, lower; u64 ref48; + /* only type1 and type 2 PI formats have a reftag */ + switch (ns->head->pi_type) { + case NVME_NS_DPS_PI_TYPE1: + case NVME_NS_DPS_PI_TYPE2: + break; + default: + return; + } + /* both rw and write zeroes share the same reftag format */ switch (ns->head->guard_type) { case NVME_NVM_NS_16B_GUARD: @@ -929,13 +938,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, if (nvme_ns_has_pi(ns->head)) { cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); - - switch (ns->head->pi_type) { - case NVME_NS_DPS_PI_TYPE1: - case NVME_NS_DPS_PI_TYPE2: - nvme_set_ref_tag(ns, cmnd, req); - break; - } + nvme_set_ref_tag(ns, cmnd, req); } return BLK_STS_OK; @@ -1014,6 +1017,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head))) return BLK_STS_NOTSUPP; control |= NVME_RW_PRINFO_PRACT; + nvme_set_ref_tag(ns, cmnd, req); } switch (ns->head->pi_type) { diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c index 5b639c942f17..a2f50db2b8ba 100644 --- a/drivers/pcmcia/omap_cf.c +++ b/drivers/pcmcia/omap_cf.c @@ -304,7 +304,13 @@ static void __exit omap_cf_remove(struct platform_device *pdev) kfree(cf); } -static struct platform_driver omap_cf_driver = { +/* + * omap_cf_remove() lives in .exit.text. For drivers registered via + * platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver omap_cf_driver __refdata = { .driver = { .name = driver_name, }, diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 8f06adf82836..1955495ea95f 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -146,7 +146,12 @@ static struct quirk_entry quirk_asus_ignore_fan = { }; static struct quirk_entry quirk_asus_zenbook_duo_kbd = { - .ignore_key_wlan = true, + .key_wlan_event = ASUS_WMI_KEY_IGNORE, +}; + +static struct quirk_entry quirk_asus_z13 = { + .key_wlan_event = ASUS_WMI_KEY_ARMOURY, + .tablet_switch_mode = asus_wmi_kbd_dock_devid, }; static int dmi_matched(const struct dmi_system_id *dmi) @@ -538,6 +543,15 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_zenbook_duo_kbd, }, + { + .callback = dmi_matched, + .ident = "ASUS ROG Z13", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow Z13"), + }, + .driver_data = &quirk_asus_z13, + }, {}, }; @@ -635,6 +649,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_IGNORE, 0xCF, }, /* AC mode */ { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */ { KE_KEY, 0xBD, { KEY_PROG2 } }, /* Lid flip action on ROG xflow laptops */ + { KE_KEY, ASUS_WMI_KEY_ARMOURY, { KEY_PROG3 } }, { KE_END, 0}, }; @@ -654,9 +669,11 @@ static void asus_nb_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code, if (atkbd_reports_vol_keys) *code = ASUS_WMI_KEY_IGNORE; break; - case 0x5F: /* Wireless console Disable */ - if (quirks->ignore_key_wlan) - *code = ASUS_WMI_KEY_IGNORE; + case 0x5D: /* Wireless console Toggle */ + case 0x5E: /* Wireless console Enable / Keyboard Attach, Detach */ + case 0x5F: /* Wireless console Disable / Special Key */ + if (quirks->key_wlan_event) + *code = quirks->key_wlan_event; break; } } diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index d02f15fd3482..56fe5d8a1196 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h @@ -18,6 +18,7 @@ #include <linux/i8042.h> #define ASUS_WMI_KEY_IGNORE (-1) +#define ASUS_WMI_KEY_ARMOURY 0xffff01 #define ASUS_WMI_BRN_DOWN 0x2e #define ASUS_WMI_BRN_UP 0x2f @@ -40,7 +41,7 @@ struct quirk_entry { bool wmi_force_als_set; bool wmi_ignore_fan; bool filter_i8042_e1_extended_codes; - bool ignore_key_wlan; + int key_wlan_event; enum asus_wmi_tablet_switch_mode tablet_switch_mode; int wapf; /* diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 871f03d160c5..14be797e89c3 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1909,8 +1909,8 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); - if ((cache.flags & 0xff) == 0xff) - cache.flags = -1; /* read error */ + if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff) + cache.flags = -ENODEV; /* bq27000 hdq read error */ if (cache.flags >= 0) { cache.capacity = bq27xxx_battery_read_soc(di); diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index fc079b9dcf71..502571f0c203 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -1383,11 +1383,6 @@ static int pcf2127_i2c_probe(struct i2c_client *client) variant = &pcf21xx_cfg[type]; } - if (variant->type == PCF2131) { - config.read_flag_mask = 0x0; - config.write_flag_mask = 0x0; - } - config.max_register = variant->max_register, regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap, @@ -1461,6 +1456,11 @@ static int pcf2127_spi_probe(struct spi_device *spi) variant = &pcf21xx_cfg[type]; } + if (variant->type == PCF2131) { + config.read_flag_mask = 0x0; + config.write_flag_mask = 0x0; + } + config.max_register = variant->max_register; regmap = devm_regmap_init_spi(spi, &config); diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index bdc664ad6a93..1fcc9348dd43 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -101,13 +101,34 @@ static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings) return string_length; } +static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc) +{ + struct xhci_ep_ctx *ep_ctx; + unsigned int max_burst; + dma_addr_t deq; + + max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control)); + + /* Populate bulk out endpoint context: */ + ep_ctx = dbc_bulkout_ctx(dbc); + deq = dbc_bulkout_enq(dbc); + ep_ctx->ep_info = 0; + ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); + ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); + + /* Populate bulk in endpoint context: */ + ep_ctx = dbc_bulkin_ctx(dbc); + deq = dbc_bulkin_enq(dbc); + ep_ctx->ep_info = 0; + ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); + ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); +} + static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) { struct dbc_info_context *info; - struct xhci_ep_ctx *ep_ctx; u32 dev_info; - dma_addr_t deq, dma; - unsigned int max_burst; + dma_addr_t dma; if (!dbc) return; @@ -121,20 +142,8 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3); info->length = cpu_to_le32(string_length); - /* Populate bulk out endpoint context: */ - ep_ctx = dbc_bulkout_ctx(dbc); - max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control)); - deq = dbc_bulkout_enq(dbc); - ep_ctx->ep_info = 0; - ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); - ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); - - /* Populate bulk in endpoint context: */ - ep_ctx = dbc_bulkin_ctx(dbc); - deq = dbc_bulkin_enq(dbc); - ep_ctx->ep_info = 0; - ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); - ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); + /* Populate bulk in and out endpoint contexts: */ + xhci_dbc_init_ep_contexts(dbc); /* Set DbC context and info registers: */ lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp); @@ -435,6 +444,42 @@ dbc_alloc_ctx(struct device *dev, gfp_t flags) return ctx; } +static void xhci_dbc_ring_init(struct xhci_ring *ring) +{ + struct xhci_segment *seg = ring->first_seg; + + /* clear all trbs on ring in case of old ring */ + memset(seg->trbs, 0, TRB_SEGMENT_SIZE); + + /* Only event ring does not use link TRB */ + if (ring->type != TYPE_EVENT) { + union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; + + trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma); + trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); + } + xhci_initialize_ring_info(ring); +} + +static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc) +{ + struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring; + struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring; + + if (!in_ring || !out_ring || !dbc->ctx) { + dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n"); + return -ENODEV; + } + + xhci_dbc_ring_init(in_ring); + xhci_dbc_ring_init(out_ring); + + /* set ep context enqueue, dequeue, and cycle to initial values */ + xhci_dbc_init_ep_contexts(dbc); + + return 0; +} + static struct xhci_ring * xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags) { @@ -463,15 +508,10 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags) seg->dma = dma; - /* Only event ring does not use link TRB */ - if (type != TYPE_EVENT) { - union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; - - trb->link.segment_ptr = cpu_to_le64(dma); - trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); - } INIT_LIST_HEAD(&ring->td_list); - xhci_initialize_ring_info(ring, 1); + + xhci_dbc_ring_init(ring); + return ring; dma_fail: kfree(seg); @@ -863,7 +903,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) dev_info(dbc->dev, "DbC cable unplugged\n"); dbc->state = DS_ENABLED; xhci_dbc_flush_requests(dbc); - + xhci_dbc_reinit_ep_rings(dbc); return EVT_DISC; } @@ -873,7 +913,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) writel(portsc, &dbc->regs->portsc); dbc->state = DS_ENABLED; xhci_dbc_flush_requests(dbc); - + xhci_dbc_reinit_ep_rings(dbc); return EVT_DISC; } diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index f8ba15e7c225..570210e8a8e8 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -214,14 +214,11 @@ static void xhci_ring_dump_segment(struct seq_file *s, static int xhci_ring_trb_show(struct seq_file *s, void *unused) { - int i; struct xhci_ring *ring = *(struct xhci_ring **)s->private; struct xhci_segment *seg = ring->first_seg; - for (i = 0; i < ring->num_segs; i++) { + xhci_for_each_ring_seg(ring->first_seg, seg) xhci_ring_dump_segment(s, seg); - seg = seg->next; - } return 0; } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 91b47f9573cd..c9694526b157 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -27,14 +27,12 @@ * "All components of all Command and Transfer TRBs shall be initialized to '0'" */ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, - unsigned int cycle_state, unsigned int max_packet, unsigned int num, gfp_t flags) { struct xhci_segment *seg; dma_addr_t dma; - int i; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); @@ -56,11 +54,6 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, return NULL; } } - /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ - if (cycle_state == 0) { - for (i = 0; i < TRBS_PER_SEGMENT; i++) - seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE); - } seg->num = num; seg->dma = dma; seg->next = NULL; @@ -138,6 +131,14 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, chain_links = xhci_link_chain_quirk(xhci, ring->type); + /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ + if (ring->cycle_state == 0) { + xhci_for_each_ring_seg(ring->first_seg, seg) { + for (int i = 0; i < TRBS_PER_SEGMENT; i++) + seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); + } + } + next = ring->enq_seg->next; xhci_link_segments(ring->enq_seg, first, ring->type, chain_links); xhci_link_segments(last, next, ring->type, chain_links); @@ -224,7 +225,6 @@ static int xhci_update_stream_segment_mapping( struct radix_tree_root *trb_address_map, struct xhci_ring *ring, struct xhci_segment *first_seg, - struct xhci_segment *last_seg, gfp_t mem_flags) { struct xhci_segment *seg; @@ -234,28 +234,22 @@ static int xhci_update_stream_segment_mapping( if (WARN_ON_ONCE(trb_address_map == NULL)) return 0; - seg = first_seg; - do { + xhci_for_each_ring_seg(first_seg, seg) { ret = xhci_insert_segment_mapping(trb_address_map, ring, seg, mem_flags); if (ret) goto remove_streams; - if (seg == last_seg) - return 0; - seg = seg->next; - } while (seg != first_seg); + } return 0; remove_streams: failed_seg = seg; - seg = first_seg; - do { + xhci_for_each_ring_seg(first_seg, seg) { xhci_remove_segment_mapping(trb_address_map, seg); if (seg == failed_seg) return ret; - seg = seg->next; - } while (seg != first_seg); + } return ret; } @@ -267,17 +261,14 @@ static void xhci_remove_stream_mapping(struct xhci_ring *ring) if (WARN_ON_ONCE(ring->trb_address_map == NULL)) return; - seg = ring->first_seg; - do { + xhci_for_each_ring_seg(ring->first_seg, seg) xhci_remove_segment_mapping(ring->trb_address_map, seg); - seg = seg->next; - } while (seg != ring->first_seg); } static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags) { return xhci_update_stream_segment_mapping(ring->trb_address_map, ring, - ring->first_seg, ring->last_seg, mem_flags); + ring->first_seg, mem_flags); } /* XXX: Do we need the hcd structure in all these functions? */ @@ -297,8 +288,7 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) kfree(ring); } -void xhci_initialize_ring_info(struct xhci_ring *ring, - unsigned int cycle_state) +void xhci_initialize_ring_info(struct xhci_ring *ring) { /* The ring is empty, so the enqueue pointer == dequeue pointer */ ring->enqueue = ring->first_seg->trbs; @@ -312,7 +302,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring, * New rings are initialized with cycle state equal to 1; if we are * handling ring expansion, set the cycle state equal to the old ring. */ - ring->cycle_state = cycle_state; + ring->cycle_state = 1; /* * Each segment has a link TRB, and leave an extra TRB for SW @@ -327,7 +317,6 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_segment **first, struct xhci_segment **last, unsigned int num_segs, - unsigned int cycle_state, enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) @@ -338,7 +327,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, chain_links = xhci_link_chain_quirk(xhci, type); - prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags); + prev = xhci_segment_alloc(xhci, max_packet, num, flags); if (!prev) return -ENOMEM; num++; @@ -347,8 +336,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, while (num < num_segs) { struct xhci_segment *next; - next = xhci_segment_alloc(xhci, cycle_state, max_packet, num, - flags); + next = xhci_segment_alloc(xhci, max_packet, num, flags); if (!next) goto free_segments; @@ -373,9 +361,8 @@ free_segments: * Set the end flag and the cycle toggle bit on the last segment. * See section 4.9.1 and figures 15 and 16. */ -struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, - unsigned int num_segs, unsigned int cycle_state, - enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) +struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) { struct xhci_ring *ring; int ret; @@ -393,7 +380,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, return ring; ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs, - cycle_state, type, max_packet, flags); + type, max_packet, flags); if (ret) goto fail; @@ -403,7 +390,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE); } - xhci_initialize_ring_info(ring, cycle_state); + xhci_initialize_ring_info(ring); trace_xhci_ring_alloc(ring); return ring; @@ -431,14 +418,14 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *last; int ret; - ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->cycle_state, - ring->type, ring->bounce_buf_len, flags); + ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->type, + ring->bounce_buf_len, flags); if (ret) return -ENOMEM; if (ring->type == TYPE_STREAM) { ret = xhci_update_stream_segment_mapping(ring->trb_address_map, - ring, first, last, flags); + ring, first, flags); if (ret) goto free_segments; } @@ -642,8 +629,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { stream_info->stream_rings[cur_stream] = - xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet, - mem_flags); + xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags); cur_ring = stream_info->stream_rings[cur_stream]; if (!cur_ring) goto cleanup_rings; @@ -984,7 +970,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, } /* Allocate endpoint 0 ring */ - dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags); + dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags); if (!dev->eps[0].ring) goto fail; @@ -1467,7 +1453,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Set up the endpoint ring */ virt_dev->eps[ep_index].new_ring = - xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); + xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags); if (!virt_dev->eps[ep_index].new_ring) return -ENOMEM; @@ -2276,7 +2262,7 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags) if (!ir) return NULL; - ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags); + ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags); if (!ir->event_ring) { xhci_warn(xhci, "Failed to allocate interrupter event ring\n"); kfree(ir); @@ -2482,7 +2468,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) goto fail; /* Set up the command ring to have one segments for now. */ - xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags); + xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags); if (!xhci->cmd_ring) goto fail; xhci_dbg_trace(xhci, trace_xhci_dbg_init, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index d5bcd5475b72..3970ec831b8c 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -41,15 +41,15 @@ MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) { - struct xhci_segment *seg = ring->first_seg; + struct xhci_segment *seg; if (!td || !td->start_seg) return false; - do { + + xhci_for_each_ring_seg(ring->first_seg, seg) { if (seg == td->start_seg) return true; - seg = seg->next; - } while (seg && seg != ring->first_seg); + } return false; } @@ -764,16 +764,12 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) struct xhci_segment *seg; ring = xhci->cmd_ring; - seg = ring->deq_seg; - do { - memset(seg->trbs, 0, - sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); - seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= - cpu_to_le32(~TRB_CYCLE); - seg = seg->next; - } while (seg != ring->deq_seg); - - xhci_initialize_ring_info(ring, 1); + xhci_for_each_ring_seg(ring->deq_seg, seg) { + memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); + seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); + } + + xhci_initialize_ring_info(ring); /* * Reset the hardware dequeue pointer. * Yes, this will need to be re-written after resume, but we're paranoid diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 67ee2e049943..b2aeb444daaf 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1263,6 +1263,9 @@ static inline const char *xhci_trb_type_string(u8 type) #define AVOID_BEI_INTERVAL_MIN 8 #define AVOID_BEI_INTERVAL_MAX 32 +#define xhci_for_each_ring_seg(head, seg) \ + for (seg = head; seg != NULL; seg = (seg->next != head ? seg->next : NULL)) + struct xhci_segment { union xhci_trb *trbs; /* private to HCD */ @@ -1800,14 +1803,12 @@ void xhci_slot_copy(struct xhci_hcd *xhci, int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep, gfp_t mem_flags); -struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, - unsigned int num_segs, unsigned int cycle_state, +struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, enum xhci_ring_type type, unsigned int max_packet, gfp_t flags); void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int num_trbs, gfp_t flags); -void xhci_initialize_ring_info(struct xhci_ring *ring, - unsigned int cycle_state); +void xhci_initialize_ring_info(struct xhci_ring *ring); void xhci_free_endpoint_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index); |