summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/qaic/qaic.h2
-rw-r--r--drivers/accel/qaic/qaic_control.c2
-rw-r--r--drivers/accel/qaic/qaic_data.c12
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c5
-rw-r--r--drivers/accel/qaic/qaic_drv.c3
-rw-r--r--drivers/base/power/runtime.c44
-rw-r--r--drivers/cdx/cdx_msi.c5
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c14
-rw-r--r--drivers/dma/idxd/init.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c3
-rw-r--r--drivers/gpu/drm/drm_draw.c2
-rw-r--r--drivers/gpu/drm/drm_draw_internal.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c98
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c28
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h6
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c13
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c13
-rw-r--r--drivers/hid/hid-input.c5
-rw-r--r--drivers/hid/hid-multitouch.c28
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c35
-rw-r--r--drivers/md/md-linear.c1
-rw-r--r--drivers/md/raid0.c16
-rw-r--r--drivers/md/raid1.c37
-rw-r--r--drivers/md/raid10.c55
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c225
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c2
-rw-r--r--drivers/net/can/m_can/m_can.c84
-rw-r--r--drivers/net/can/m_can/m_can.h1
-rw-r--r--drivers/net/can/m_can/m_can_platform.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c23
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h13
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c194
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h5
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c5
-rw-r--r--drivers/net/netdevsim/netdev.c7
-rw-r--r--drivers/net/usb/lan78xx.c38
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c39
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c2
-rw-r--r--drivers/nvme/host/multipath.c6
-rw-r--r--drivers/nvme/host/tcp.c3
-rw-r--r--drivers/phy/cadence/cdns-dphy.c131
-rw-r--r--drivers/usb/gadget/function/f_acm.c42
-rw-r--r--drivers/usb/gadget/function/f_ecm.c48
-rw-r--r--drivers/usb/gadget/function/f_ncm.c78
-rw-r--r--drivers/usb/gadget/function/f_rndis.c85
-rw-r--r--drivers/usb/gadget/udc/core.c3
73 files changed, 1141 insertions, 598 deletions
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 02561b6cecc6..2d7b3af09e28 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -91,6 +91,8 @@ struct dma_bridge_chan {
* response queue's head and tail pointer of this DBC.
*/
void __iomem *dbc_base;
+ /* Synchronizes access to Request queue's head and tail pointer */
+ struct mutex req_lock;
/* Head of list where each node is a memory handle queued in request queue */
struct list_head xfer_list;
/* Synchronizes DBC readers during cleanup */
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index d8bdab69f800..b86a8e48e731 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -407,7 +407,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
return -EINVAL;
remaining = in_trans->size - resources->xferred_dma_size;
if (remaining == 0)
- return 0;
+ return -EINVAL;
if (check_add_overflow(xfer_start_addr, remaining, &end))
return -EINVAL;
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 43aba57b48f0..265eeb4e156f 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -1357,13 +1357,17 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
goto release_ch_rcu;
}
+ ret = mutex_lock_interruptible(&dbc->req_lock);
+ if (ret)
+ goto release_ch_rcu;
+
head = readl(dbc->dbc_base + REQHP_OFF);
tail = readl(dbc->dbc_base + REQTP_OFF);
if (head == U32_MAX || tail == U32_MAX) {
/* PCI link error */
ret = -ENODEV;
- goto release_ch_rcu;
+ goto unlock_req_lock;
}
queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
@@ -1371,11 +1375,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
head, &tail);
if (ret)
- goto release_ch_rcu;
+ goto unlock_req_lock;
/* Finalize commit to hardware */
submit_ts = ktime_get_ns();
writel(tail, dbc->dbc_base + REQTP_OFF);
+ mutex_unlock(&dbc->req_lock);
update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
submit_ts, queue_level);
@@ -1383,6 +1388,9 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
if (datapath_polling)
schedule_work(&dbc->poll_work);
+unlock_req_lock:
+ if (ret)
+ mutex_unlock(&dbc->req_lock);
release_ch_rcu:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
unlock_dev_srcu:
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
index 20b653d99e52..5ed49daaf541 100644
--- a/drivers/accel/qaic/qaic_debugfs.c
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -251,6 +251,9 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
if (ret)
goto destroy_workqueue;
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->bootlog_ch = mhi_dev;
+
for (i = 0; i < BOOTLOG_POOL_SIZE; i++) {
msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL);
if (!msg) {
@@ -266,8 +269,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
goto mhi_unprepare;
}
- dev_set_drvdata(&mhi_dev->dev, qdev);
- qdev->bootlog_ch = mhi_dev;
return 0;
mhi_unprepare:
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 10e711c96a67..cb606c4bb851 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -422,6 +422,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
return NULL;
init_waitqueue_head(&qdev->dbc[i].dbc_release);
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
+ ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock);
+ if (ret)
+ return NULL;
}
return qdev;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index c7ec69597a95..d8aaa5b61628 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1554,6 +1554,32 @@ out:
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
+static void pm_runtime_set_suspended_action(void *data)
+{
+ pm_runtime_set_suspended(data);
+}
+
+/**
+ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_set_active_enabled(struct device *dev)
+{
+ int err;
+
+ err = pm_runtime_set_active(dev);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
+ if (err)
+ return err;
+
+ return devm_pm_runtime_enable(dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
+
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
@@ -1576,6 +1602,24 @@ int devm_pm_runtime_enable(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+static void pm_runtime_put_noidle_action(void *data)
+{
+ pm_runtime_put_noidle(data);
+}
+
+/**
+ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_get_noresume(struct device *dev)
+{
+ pm_runtime_get_noresume(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
+
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
index e55f1716cfcb..d7bade143998 100644
--- a/drivers/cdx/cdx_msi.c
+++ b/drivers/cdx/cdx_msi.c
@@ -165,7 +165,7 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
struct device_node *parent_node;
struct irq_domain *parent;
- fwnode_handle = of_node_to_fwnode(np);
+ fwnode_handle = of_fwnode_handle(np);
parent_node = of_parse_phandle(np, "msi-map", 1);
if (!parent_node) {
@@ -173,7 +173,8 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
return NULL;
}
- parent = irq_find_matching_fwnode(of_node_to_fwnode(parent_node), DOMAIN_BUS_NEXUS);
+ parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS);
+ of_node_put(parent_node);
if (!parent || !msi_get_domain_info(parent)) {
dev_err(dev, "unable to locate ITS domain\n");
return NULL;
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 8d5279c21e6c..1abedcae50b2 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -339,6 +339,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
return 0;
}
+static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+{
+ unsigned int transition_latency_ns = cppc_get_transition_latency(cpu);
+
+ if (transition_latency_ns == CPUFREQ_ETERNAL)
+ return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
+
+ return transition_latency_ns / NSEC_PER_USEC;
+}
+
/*
* The PCC subspace describes the rate at which platform can accept commands
* on the shared PCC channel (including READs which do not count towards freq
@@ -361,12 +371,12 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
return 10000;
}
}
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#else
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 74a83203181d..e55136bb525e 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -923,6 +923,8 @@ static void idxd_remove(struct pci_dev *pdev)
idxd_cleanup_interrupts(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
+ if (device_user_pasid_enabled(idxd))
+ idxd_disable_sva(idxd->pdev);
pci_iounmap(pdev, idxd->reg_base);
put_device(idxd_confdev(idxd));
pci_disable_device(pdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c7b18c52825d..784651269ec5 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -83,7 +83,8 @@ amdgpu-y += \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
- nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o
+ nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \
+ cyan_skillfish_reg_init.o
# add DF block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1465b3adacb0..d349a4816e53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2353,10 +2353,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *mem)
{
- if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+ if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
- mb(); /* make sure read happened */
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 6042956cd5c3..e00b5e454234 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1016,7 +1016,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
/* Until a uniform way is figured, get mask based on hwid */
switch (hw_id) {
case VCN_HWID:
- harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
+ /* VCN vs UVD+VCE */
+ if (!amdgpu_ip_version(adev, VCE_HWIP, 0))
+ harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
break;
case DMU_HWID:
if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
@@ -2462,7 +2464,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
@@ -2489,7 +2493,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
@@ -2516,8 +2522,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
+ adev->sdma.sdma_mask = 1;
adev->vcn.num_vcn_inst = 1;
adev->gmc.num_umc = 2;
+ adev->gfx.xcc_mask = 1;
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
@@ -2560,7 +2568,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 8;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
@@ -2588,8 +2598,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
+ adev->sdma.sdma_mask = 0xff;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 8;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
@@ -2621,8 +2633,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
+ adev->sdma.sdma_mask = 0x1f;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
@@ -2644,6 +2658,38 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
+ case CHIP_CYAN_SKILLFISH:
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ } else {
+ cyan_skillfish_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
+ adev->gfx.xcc_mask = 1;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
+ }
+ break;
default:
r = amdgpu_discovery_reg_base_init(adev);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8553ac4c0ad3..a8358d1d1acb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2171,7 +2171,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
}
ret = psp_ta_load(psp, &psp->securedisplay_context.context);
- if (!ret) {
+ if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
} else
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
new file mode 100644
index 000000000000..96616a865aac
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "nv.h"
+
+#include "soc15_common.h"
+#include "soc15_hw_ip.h"
+#include "cyan_skillfish_ip_offset.h"
+
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the blocke needed by driver */
+ uint32_t i;
+
+ adev->gfx.xcc_mask = 1;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 994432fb57ea..8e2f73125650 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1055,7 +1055,7 @@ static int gmc_v7_0_sw_init(void *handle)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1287,7 +1287,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1303,8 +1303,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
- mb();
- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 86488c052f82..5248832c04ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1168,7 +1168,7 @@ static int gmc_v8_0_sw_init(void *handle)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1468,7 +1468,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1484,8 +1484,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
- mb();
- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index e3f4f5fbbd6e..19cbf80fa321 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -224,7 +224,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
pipe, x_pkt->header.opcode);
r = amdgpu_fence_wait_polling(ring, seq, timeout);
- if (r < 1 || !*status_ptr) {
+
+ /*
+ * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success).
+ * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information.
+ */
+ if (r < 1 || !(lower_32_bits(*status_ptr))) {
if (misc_op_str)
dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h
index 83e9782aef39..8f4817404f10 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/nv.h
@@ -31,5 +31,6 @@ extern const struct amdgpu_ip_block_version nv_common_ip_block;
void nv_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void nv_set_virt_ops(struct amdgpu_device *adev);
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 632a25957477..3018e294673a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5444,8 +5444,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
else if (hwmgr->pp_table_version == PP_TABLE_V0)
- thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->max = data->thermal_temp_setting.temperature_shutdown;
thermal_data->sw_ctf_threshold = thermal_data->max;
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index c8881796fba4..4014375f06ea 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -120,8 +120,7 @@ static int lt9211_read_chipid(struct lt9211 *ctx)
}
/* Test for known Chip ID. */
- if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE ||
- chipid[2] != REG_CHIPID2_VALUE) {
+ if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE) {
dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n",
chipid[0], chipid[1], chipid[2]);
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
index d41f8ae1c148..b61ebc5bdd5c 100644
--- a/drivers/gpu/drm/drm_draw.c
+++ b/drivers/gpu/drm/drm_draw.c
@@ -125,7 +125,7 @@ EXPORT_SYMBOL(drm_draw_fill16);
void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
- u16 color)
+ u32 color)
{
unsigned int y, x;
diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h
index f121ee7339dc..20cb404e23ea 100644
--- a/drivers/gpu/drm/drm_draw_internal.h
+++ b/drivers/gpu/drm/drm_draw_internal.h
@@ -47,7 +47,7 @@ void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
- u16 color);
+ u32 color);
void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 9eeba254cf45..2a218d205842 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -51,7 +51,6 @@ struct decon_context {
void __iomem *regs;
unsigned long irq_flags;
bool i80_if;
- bool suspended;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
@@ -81,13 +80,30 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
DRM_PLANE_TYPE_CURSOR,
};
-static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
+/**
+ * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
+ *
+ * @ctx: display and enhancement controller context
+ * @win: window to protect registers for
+ * @protect: 1 to protect (disable updates)
+ */
+static void decon_shadow_protect_win(struct decon_context *ctx,
+ unsigned int win, bool protect)
{
- struct decon_context *ctx = crtc->ctx;
+ u32 bits, val;
- if (ctx->suspended)
- return;
+ bits = SHADOWCON_WINx_PROTECT(win);
+
+ val = readl(ctx->regs + SHADOWCON);
+ if (protect)
+ val |= bits;
+ else
+ val &= ~bits;
+ writel(val, ctx->regs + SHADOWCON);
+}
+static void decon_wait_for_vblank(struct decon_context *ctx)
+{
atomic_set(&ctx->wait_vsync_event, 1);
/*
@@ -100,25 +116,33 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
}
-static void decon_clear_channels(struct exynos_drm_crtc *crtc)
+static void decon_clear_channels(struct decon_context *ctx)
{
- struct decon_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
+ u32 val;
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
- u32 val = readl(ctx->regs + WINCON(win));
+ val = readl(ctx->regs + WINCON(win));
if (val & WINCONx_ENWIN) {
+ decon_shadow_protect_win(ctx, win, true);
+
val &= ~WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
ch_enabled = 1;
+
+ decon_shadow_protect_win(ctx, win, false);
}
}
+ val = readl(ctx->regs + DECON_UPDATE);
+ val |= DECON_UPDATE_STANDALONE_F;
+ writel(val, ctx->regs + DECON_UPDATE);
+
/* Wait for vsync, as disable channel takes effect at next vsync */
if (ch_enabled)
- decon_wait_for_vblank(ctx->crtc);
+ decon_wait_for_vblank(ctx);
}
static int decon_ctx_initialize(struct decon_context *ctx,
@@ -126,7 +150,7 @@ static int decon_ctx_initialize(struct decon_context *ctx,
{
ctx->drm_dev = drm_dev;
- decon_clear_channels(ctx->crtc);
+ decon_clear_channels(ctx);
return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
}
@@ -155,9 +179,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
u32 val, clkdiv;
- if (ctx->suspended)
- return;
-
/* nothing to do if we haven't set the mode yet */
if (mode->htotal == 0 || mode->vtotal == 0)
return;
@@ -219,9 +240,6 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
- if (ctx->suspended)
- return -EPERM;
-
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -244,9 +262,6 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
- if (ctx->suspended)
- return;
-
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -343,36 +358,11 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
}
-/**
- * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
- *
- * @ctx: display and enhancement controller context
- * @win: window to protect registers for
- * @protect: 1 to protect (disable updates)
- */
-static void decon_shadow_protect_win(struct decon_context *ctx,
- unsigned int win, bool protect)
-{
- u32 bits, val;
-
- bits = SHADOWCON_WINx_PROTECT(win);
-
- val = readl(ctx->regs + SHADOWCON);
- if (protect)
- val |= bits;
- else
- val &= ~bits;
- writel(val, ctx->regs + SHADOWCON);
-}
-
static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, true);
}
@@ -392,9 +382,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
- if (ctx->suspended)
- return;
-
/*
* SHADOWCON/PRTCON register is used for enabling timing.
*
@@ -482,9 +469,6 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
unsigned int win = plane->index;
u32 val;
- if (ctx->suspended)
- return;
-
/* protect windows */
decon_shadow_protect_win(ctx, win, true);
@@ -503,9 +487,6 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
exynos_crtc_handle_event(crtc);
@@ -533,9 +514,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int ret;
- if (!ctx->suspended)
- return;
-
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
@@ -549,8 +527,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
decon_enable_vblank(ctx->crtc);
decon_commit(ctx->crtc);
-
- ctx->suspended = false;
}
static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
@@ -558,9 +534,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
/*
* We need to make sure that all windows are disabled before we
* suspend that connector. Otherwise we might try to scan from
@@ -570,8 +543,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
decon_disable_plane(crtc, &ctx->planes[i]);
pm_runtime_put_sync(ctx->dev);
-
- ctx->suspended = true;
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
@@ -692,7 +663,6 @@ static int decon_probe(struct platform_device *pdev)
return -ENOMEM;
ctx->dev = dev;
- ctx->suspended = true;
i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
if (i80_if_timings)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 0d5197c0824a..5cf3a516ccfb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -1324,9 +1324,16 @@ static int ct_receive(struct intel_guc_ct *ct)
static void ct_try_receive_message(struct intel_guc_ct *ct)
{
+ struct intel_guc *guc = ct_to_guc(ct);
int ret;
- if (GEM_WARN_ON(!ct->enabled))
+ if (!ct->enabled) {
+ GEM_WARN_ON(!guc_to_gt(guc)->uc.reset_in_progress);
+ return;
+ }
+
+ /* When interrupt disabled, message handling is not expected */
+ if (!guc->interrupts.enabled)
return;
ret = ct_receive(ct);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 67fa528f546d..8609fa38058e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -236,6 +236,8 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
if (ret)
DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
+ set_bit(GMU_STATUS_FW_START, &gmu->status);
+
return ret;
}
@@ -482,6 +484,9 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
int ret;
u32 val;
+ if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status))
+ return 0;
+
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
@@ -509,6 +514,9 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
int ret;
u32 val;
+ if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status))
+ return;
+
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
@@ -517,6 +525,8 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status);
}
static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
@@ -645,8 +655,6 @@ setup_pdc:
/* ensure no writes happen before the uCode is fully written */
wmb();
- a6xx_rpmh_stop(gmu);
-
err:
if (!IS_ERR_OR_NULL(pdcptr))
iounmap(pdcptr);
@@ -799,19 +807,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
else
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
- if (state == GMU_WARM_BOOT) {
- ret = a6xx_rpmh_start(gmu);
- if (ret)
- return ret;
- } else {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+
+ if (state == GMU_COLD_BOOT) {
if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
"GMU firmware is not loaded\n"))
return -ENOENT;
- ret = a6xx_rpmh_start(gmu);
- if (ret)
- return ret;
-
ret = a6xx_gmu_fw_load(gmu);
if (ret)
return ret;
@@ -980,6 +984,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Reset GPU core blocks */
a6xx_gpu_sw_reset(gpu, true);
+
+ a6xx_rpmh_stop(gmu);
}
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 94b6c5cab6f4..db5b3b13e743 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -99,6 +99,12 @@ struct a6xx_gmu {
struct completion pd_gate;
struct qmp *qmp;
+
+/* To check if we can trigger sleep seq at PDC. Cleared in a6xx_rpmh_stop() */
+#define GMU_STATUS_FW_START 0
+/* To track if PDC sleep seq was done */
+#define GMU_STATUS_PDC_SLEEP 1
+ unsigned long status;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 4e2d3a02ea06..cdd6e1c08ceb 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -1057,6 +1057,7 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
}
panthor_job_irq_suspend(&ptdev->fw->irq);
+ panthor_fw_stop(ptdev);
}
/**
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 5d7df4c3b08c..a551458ad434 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -1118,7 +1118,7 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,
return format;
if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 ||
- drm_rect_width(dest) < 4 || drm_rect_width(dest) < 4) {
+ drm_rect_width(dest) < 4 || drm_rect_height(dest) < 4) {
drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n",
drm_rect_width(src) >> 16, drm_rect_height(src) >> 16,
drm_rect_width(dest), drm_rect_height(dest));
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 416590ea0dc3..d5260cb1ed0e 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -952,13 +952,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
dma_resv_assert_held(resv);
dma_resv_for_each_fence(&cursor, resv, usage, fence) {
- /* Make sure to grab an additional ref on the added fence */
- dma_fence_get(fence);
- ret = drm_sched_job_add_dependency(job, fence);
- if (ret) {
- dma_fence_put(fence);
+ /*
+ * As drm_sched_job_add_dependency always consumes the fence
+ * reference (even when it fails), and dma_resv_for_each_fence
+ * is not obtaining one, we need to grab one before calling.
+ */
+ ret = drm_sched_job_add_dependency(job, dma_fence_get(fence));
+ if (ret)
return ret;
- }
}
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index cf6946424fc3..03d674e9e807 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -41,6 +41,7 @@
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"
#include "xe_trace.h"
+#include "xe_uc_fw.h"
#include "xe_vm.h"
static struct xe_guc *
@@ -1285,7 +1286,17 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
trace_xe_exec_queue_cleanup_entity(q);
- if (exec_queue_registered(q))
+ /*
+ * Expected state transitions for cleanup:
+ * - If the exec queue is registered and GuC firmware is running, we must first
+ * disable scheduling and deregister the queue to ensure proper teardown and
+ * resource release in the GuC, then destroy the exec queue on driver side.
+ * - If the GuC is already stopped (e.g., during driver unload or GPU reset),
+ * we cannot expect a response for the deregister request. In this case,
+ * it is safe to directly destroy the exec queue on driver side, as the GuC
+ * will not process further requests and all resources must be cleaned up locally.
+ */
+ if (exec_queue_registered(q) && xe_uc_fw_is_running(&guc->fw))
disable_scheduling_deregister(guc, q);
else
__guc_exec_queue_fini(guc, q);
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index f5c217ac4bfa..f073d5621050 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -622,7 +622,10 @@ static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
return;
}
- if (value == 0 || value < dev->battery_min || value > dev->battery_max)
+ if ((usage & HID_USAGE_PAGE) == HID_UP_DIGITIZER && value == 0)
+ return;
+
+ if (value < dev->battery_min || value > dev->battery_max)
return;
capacity = hidinput_scale_battery_capacity(dev, value);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 5c424010bc02..0e4cb0e668eb 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -83,9 +83,8 @@ enum latency_mode {
HID_LATENCY_HIGH = 1,
};
-#define MT_IO_FLAGS_RUNNING 0
-#define MT_IO_FLAGS_ACTIVE_SLOTS 1
-#define MT_IO_FLAGS_PENDING_SLOTS 2
+#define MT_IO_SLOTS_MASK GENMASK(7, 0) /* reserve first 8 bits for slot tracking */
+#define MT_IO_FLAGS_RUNNING 32
static const bool mtrue = true; /* default for true */
static const bool mfalse; /* default for false */
@@ -160,7 +159,11 @@ struct mt_device {
struct mt_class mtclass; /* our mt device class */
struct timer_list release_timer; /* to release sticky fingers */
struct hid_device *hdev; /* hid_device we're attached to */
- unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
+ unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_RUNNING)
+ * first 8 bits are reserved for keeping the slot
+ * states, this is fine because we only support up
+ * to 250 slots (MT_MAX_MAXCONTACT)
+ */
__u8 inputmode_value; /* InputMode HID feature value */
__u8 maxcontacts;
bool is_buttonpad; /* is this device a button pad? */
@@ -941,6 +944,7 @@ static void mt_release_pending_palms(struct mt_device *td,
for_each_set_bit(slotnum, app->pending_palm_slots, td->maxcontacts) {
clear_bit(slotnum, app->pending_palm_slots);
+ clear_bit(slotnum, &td->mt_io_flags);
input_mt_slot(input, slotnum);
input_mt_report_slot_inactive(input);
@@ -972,12 +976,6 @@ static void mt_sync_frame(struct mt_device *td, struct mt_application *app,
app->num_received = 0;
app->left_button_state = 0;
-
- if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
- set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
- else
- clear_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
- clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
}
static int mt_compute_timestamp(struct mt_application *app, __s32 value)
@@ -1152,7 +1150,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
- set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
+ set_bit(slotnum, &td->mt_io_flags);
+ } else {
+ clear_bit(slotnum, &td->mt_io_flags);
}
return 0;
@@ -1287,7 +1287,7 @@ static void mt_touch_report(struct hid_device *hid,
* defect.
*/
if (app->quirks & MT_QUIRK_STICKY_FINGERS) {
- if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
+ if (td->mt_io_flags & MT_IO_SLOTS_MASK)
mod_timer(&td->release_timer,
jiffies + msecs_to_jiffies(100));
else
@@ -1663,6 +1663,7 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
case HID_CP_CONSUMER_CONTROL:
case HID_GD_WIRELESS_RADIO_CTLS:
case HID_GD_SYSTEM_MULTIAXIS:
+ case HID_DG_PEN:
/* already handled by hid core */
break;
case HID_DG_TOUCHSCREEN:
@@ -1734,6 +1735,7 @@ static void mt_release_contacts(struct hid_device *hid)
for (i = 0; i < mt->num_slots; i++) {
input_mt_slot(input_dev, i);
input_mt_report_slot_inactive(input_dev);
+ clear_bit(i, &td->mt_io_flags);
}
input_mt_sync_frame(input_dev);
input_sync(input_dev);
@@ -1756,7 +1758,7 @@ static void mt_expired_timeout(struct timer_list *t)
*/
if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
return;
- if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
+ if (td->mt_io_flags & MT_IO_SLOTS_MASK)
mt_release_contacts(hdev);
clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
}
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 790bc8fbf21d..9f88d8ca6d1b 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -667,20 +667,12 @@ static void inv_icm42600_disable_vdd_reg(void *_data)
static void inv_icm42600_disable_vddio_reg(void *_data)
{
struct inv_icm42600_state *st = _data;
- const struct device *dev = regmap_get_device(st->map);
- int ret;
-
- ret = regulator_disable(st->vddio_supply);
- if (ret)
- dev_err(dev, "failed to disable vddio error %d\n", ret);
-}
+ struct device *dev = regmap_get_device(st->map);
-static void inv_icm42600_disable_pm(void *_data)
-{
- struct device *dev = _data;
+ if (pm_runtime_status_suspended(dev))
+ return;
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
+ regulator_disable(st->vddio_supply);
}
int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
@@ -777,16 +769,14 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
return ret;
/* setup runtime power management */
- ret = pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_set_active_enabled(dev);
if (ret)
return ret;
- pm_runtime_get_noresume(dev);
- pm_runtime_enable(dev);
+
pm_runtime_set_autosuspend_delay(dev, INV_ICM42600_SUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
- pm_runtime_put(dev);
- return devm_add_action_or_reset(dev, inv_icm42600_disable_pm, dev);
+ return ret;
}
EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600);
@@ -797,17 +787,15 @@ EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600);
static int inv_icm42600_suspend(struct device *dev)
{
struct inv_icm42600_state *st = dev_get_drvdata(dev);
- int ret;
+ int ret = 0;
mutex_lock(&st->lock);
st->suspended.gyro = st->conf.gyro.mode;
st->suspended.accel = st->conf.accel.mode;
st->suspended.temp = st->conf.temp_en;
- if (pm_runtime_suspended(dev)) {
- ret = 0;
+ if (pm_runtime_suspended(dev))
goto out_unlock;
- }
/* disable FIFO data streaming */
if (st->fifo.on) {
@@ -839,10 +827,13 @@ static int inv_icm42600_resume(struct device *dev)
struct inv_icm42600_state *st = dev_get_drvdata(dev);
struct inv_icm42600_sensor_state *gyro_st = iio_priv(st->indio_gyro);
struct inv_icm42600_sensor_state *accel_st = iio_priv(st->indio_accel);
- int ret;
+ int ret = 0;
mutex_lock(&st->lock);
+ if (pm_runtime_suspended(dev))
+ goto out_unlock;
+
ret = inv_icm42600_enable_regulator_vddio(st);
if (ret)
goto out_unlock;
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 369aed044b40..d733ebee624b 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -267,6 +267,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
}
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
submit_bio_noacct(bio);
bio = split;
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 31bea72bcb01..db1ab214250f 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -464,7 +464,15 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
struct bio *split = bio_split(bio,
zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
&mddev->bio_set);
+
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return;
+ }
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
submit_bio_noacct(bio);
bio = split;
end = zone->zone_end;
@@ -606,7 +614,15 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
if (sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, sectors, GFP_NOIO,
&mddev->bio_set);
+
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return true;
+ }
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
raid0_map_submit_bio(mddev, bio);
bio = split;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index faccf7344ef9..4c6b1bd6da9b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1317,7 +1317,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct raid1_info *mirror;
struct bio *read_bio;
int max_sectors;
- int rdisk;
+ int rdisk, error;
bool r1bio_existed = !!r1_bio;
/*
@@ -1378,7 +1378,14 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
+
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
submit_bio_noacct(bio);
bio = split;
r1_bio->master_bio = bio;
@@ -1404,6 +1411,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r1_bio;
mddev_trace_remap(mddev, read_bio, r1_bio->sector);
submit_bio_noacct(read_bio);
+ return;
+
+err_handle:
+ atomic_dec(&mirror->rdev->nr_pending);
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+ raid_end_bio_io(r1_bio);
}
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
@@ -1411,7 +1425,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
- int i, disks;
+ int i, disks, k, error;
unsigned long flags;
struct md_rdev *blocked_rdev;
int first_clone;
@@ -1557,7 +1571,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
GFP_NOIO, &conf->bio_split);
+
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
submit_bio_noacct(bio);
bio = split;
r1_bio->master_bio = bio;
@@ -1640,6 +1661,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
/* In case raid1d snuck in to freeze_array */
wake_up_barrier(conf);
+ return;
+err_handle:
+ for (k = 0; k < i; k++) {
+ if (r1_bio->bios[k]) {
+ rdev_dec_pending(conf->mirrors[k].rdev, mddev);
+ r1_bio->bios[k] = NULL;
+ }
+ }
+
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+ raid_end_bio_io(r1_bio);
}
static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6579bbb6a39a..b0062ad9b1d9 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1153,6 +1153,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
int slot = r10_bio->read_slot;
struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO;
+ int error;
if (slot >= 0 && r10_bio->devs[slot].rdev) {
/*
@@ -1203,7 +1204,13 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
allow_barrier(conf);
submit_bio_noacct(bio);
wait_barrier(conf, false);
@@ -1233,6 +1240,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
mddev_trace_remap(mddev, read_bio, r10_bio->sector);
submit_bio_noacct(read_bio);
return;
+err_handle:
+ atomic_dec(&rdev->nr_pending);
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R10BIO_Uptodate, &r10_bio->state);
+ raid_end_bio_io(r10_bio);
}
static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
@@ -1341,9 +1353,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
- int i;
+ int i, k;
sector_t sectors;
int max_sectors;
+ int error;
if ((mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
@@ -1469,7 +1482,13 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
if (r10_bio->sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, r10_bio->sectors,
GFP_NOIO, &conf->bio_split);
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
allow_barrier(conf);
submit_bio_noacct(bio);
wait_barrier(conf, false);
@@ -1488,6 +1507,26 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
raid10_write_one_disk(mddev, r10_bio, bio, true, i);
}
one_write_done(r10_bio);
+ return;
+err_handle:
+ for (k = 0; k < i; k++) {
+ int d = r10_bio->devs[k].devnum;
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
+ struct md_rdev *rrdev = conf->mirrors[d].replacement;
+
+ if (r10_bio->devs[k].bio) {
+ rdev_dec_pending(rdev, mddev);
+ r10_bio->devs[k].bio = NULL;
+ }
+ if (r10_bio->devs[k].repl_bio) {
+ rdev_dec_pending(rrdev, mddev);
+ r10_bio->devs[k].repl_bio = NULL;
+ }
+ }
+
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R10BIO_Uptodate, &r10_bio->state);
+ raid_end_bio_io(r10_bio);
}
static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
@@ -1629,7 +1668,14 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
if (remainder) {
split_size = stripe_size - remainder;
split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return 0;
+ }
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
allow_barrier(conf);
/* Resend the fist split part */
submit_bio_noacct(split);
@@ -1639,7 +1685,14 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
if (remainder) {
split_size = bio_sectors(bio) - remainder;
split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return 0;
+ }
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
allow_barrier(conf);
/* Resend the second split part */
submit_bio_noacct(bio);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 39e7596e78c0..4fae8ade2409 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5484,8 +5484,10 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
if (sectors < bio_sectors(raid_bio)) {
struct r5conf *conf = mddev->private;
+
split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
bio_chain(split, raid_bio);
+ trace_block_split(split, raid_bio->bi_iter.bi_sector);
submit_bio_noacct(raid_bio);
raid_bio = split;
}
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
index 2810ebe9b5f7..5a4676d52079 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
@@ -361,7 +361,7 @@ void mxc_isi_channel_get(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_put(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_enable(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_disable(struct mxc_isi_pipe *pipe);
-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass);
+int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_unchain(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_config(struct mxc_isi_pipe *pipe,
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
index 5623914f95e6..9225a7ac1c3e 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
@@ -587,7 +587,7 @@ void mxc_isi_channel_release(struct mxc_isi_pipe *pipe)
*
* TODO: Support secondary line buffer for downscaling YUV420 images.
*/
-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass)
+int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe)
{
/* Channel chaining requires both line and output buffer. */
const u8 resources = MXC_ISI_CHANNEL_RES_OUTPUT_BUF
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
index cd6c52e9d158..81223d28ee56 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
@@ -43,7 +43,6 @@ struct mxc_isi_m2m_ctx_queue_data {
struct v4l2_pix_format_mplane format;
const struct mxc_isi_format_info *info;
u32 sequence;
- bool streaming;
};
struct mxc_isi_m2m_ctx {
@@ -236,6 +235,65 @@ static void mxc_isi_m2m_vb2_buffer_queue(struct vb2_buffer *vb2)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
+static int mxc_isi_m2m_vb2_prepare_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
+ const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
+ const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
+ const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
+ struct mxc_isi_m2m *m2m = ctx->m2m;
+ int ret;
+
+ guard(mutex)(&m2m->lock);
+
+ if (m2m->usage_count == INT_MAX)
+ return -EOVERFLOW;
+
+ /*
+ * Acquire the pipe and initialize the channel with the first user of
+ * the M2M device.
+ */
+ if (m2m->usage_count == 0) {
+ bool bypass = cap_pix->width == out_pix->width &&
+ cap_pix->height == out_pix->height &&
+ cap_info->encoding == out_info->encoding;
+
+ ret = mxc_isi_channel_acquire(m2m->pipe,
+ &mxc_isi_m2m_frame_write_done,
+ bypass);
+ if (ret)
+ return ret;
+
+ mxc_isi_channel_get(m2m->pipe);
+ }
+
+ m2m->usage_count++;
+
+ /*
+ * Allocate resources for the channel, counting how many users require
+ * buffer chaining.
+ */
+ if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
+ ret = mxc_isi_channel_chain(m2m->pipe);
+ if (ret)
+ goto err_deinit;
+
+ m2m->chained_count++;
+ ctx->chained = true;
+ }
+
+ return 0;
+
+err_deinit:
+ if (--m2m->usage_count == 0) {
+ mxc_isi_channel_put(m2m->pipe);
+ mxc_isi_channel_release(m2m->pipe);
+ }
+
+ return ret;
+}
+
static int mxc_isi_m2m_vb2_start_streaming(struct vb2_queue *q,
unsigned int count)
{
@@ -265,6 +323,35 @@ static void mxc_isi_m2m_vb2_stop_streaming(struct vb2_queue *q)
}
}
+static void mxc_isi_m2m_vb2_unprepare_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct mxc_isi_m2m *m2m = ctx->m2m;
+
+ guard(mutex)(&m2m->lock);
+
+ /*
+ * If the last context is this one, reset it to make sure the device
+ * will be reconfigured when streaming is restarted.
+ */
+ if (m2m->last_ctx == ctx)
+ m2m->last_ctx = NULL;
+
+ /* Free the channel resources if this is the last chained context. */
+ if (ctx->chained && --m2m->chained_count == 0)
+ mxc_isi_channel_unchain(m2m->pipe);
+ ctx->chained = false;
+
+ /* Turn off the light with the last user. */
+ if (--m2m->usage_count == 0) {
+ mxc_isi_channel_disable(m2m->pipe);
+ mxc_isi_channel_put(m2m->pipe);
+ mxc_isi_channel_release(m2m->pipe);
+ }
+
+ WARN_ON(m2m->usage_count < 0);
+}
+
static const struct vb2_ops mxc_isi_m2m_vb2_qops = {
.queue_setup = mxc_isi_m2m_vb2_queue_setup,
.buf_init = mxc_isi_m2m_vb2_buffer_init,
@@ -272,8 +359,10 @@ static const struct vb2_ops mxc_isi_m2m_vb2_qops = {
.buf_queue = mxc_isi_m2m_vb2_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
+ .prepare_streaming = mxc_isi_m2m_vb2_prepare_streaming,
.start_streaming = mxc_isi_m2m_vb2_start_streaming,
.stop_streaming = mxc_isi_m2m_vb2_stop_streaming,
+ .unprepare_streaming = mxc_isi_m2m_vb2_unprepare_streaming,
};
static int mxc_isi_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
@@ -483,136 +572,6 @@ static int mxc_isi_m2m_s_fmt_vid(struct file *file, void *fh,
return 0;
}
-static int mxc_isi_m2m_streamon(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
- struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
- const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
- const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
- const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
- const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
- struct mxc_isi_m2m *m2m = ctx->m2m;
- bool bypass;
- int ret;
-
- if (q->streaming)
- return 0;
-
- mutex_lock(&m2m->lock);
-
- if (m2m->usage_count == INT_MAX) {
- ret = -EOVERFLOW;
- goto unlock;
- }
-
- bypass = cap_pix->width == out_pix->width &&
- cap_pix->height == out_pix->height &&
- cap_info->encoding == out_info->encoding;
-
- /*
- * Acquire the pipe and initialize the channel with the first user of
- * the M2M device.
- */
- if (m2m->usage_count == 0) {
- ret = mxc_isi_channel_acquire(m2m->pipe,
- &mxc_isi_m2m_frame_write_done,
- bypass);
- if (ret)
- goto unlock;
-
- mxc_isi_channel_get(m2m->pipe);
- }
-
- m2m->usage_count++;
-
- /*
- * Allocate resources for the channel, counting how many users require
- * buffer chaining.
- */
- if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
- ret = mxc_isi_channel_chain(m2m->pipe, bypass);
- if (ret)
- goto deinit;
-
- m2m->chained_count++;
- ctx->chained = true;
- }
-
- /*
- * Drop the lock to start the stream, as the .device_run() operation
- * needs to acquire it.
- */
- mutex_unlock(&m2m->lock);
- ret = v4l2_m2m_ioctl_streamon(file, fh, type);
- if (ret) {
- /* Reacquire the lock for the cleanup path. */
- mutex_lock(&m2m->lock);
- goto unchain;
- }
-
- q->streaming = true;
-
- return 0;
-
-unchain:
- if (ctx->chained && --m2m->chained_count == 0)
- mxc_isi_channel_unchain(m2m->pipe);
- ctx->chained = false;
-
-deinit:
- if (--m2m->usage_count == 0) {
- mxc_isi_channel_put(m2m->pipe);
- mxc_isi_channel_release(m2m->pipe);
- }
-
-unlock:
- mutex_unlock(&m2m->lock);
- return ret;
-}
-
-static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
- struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
- struct mxc_isi_m2m *m2m = ctx->m2m;
-
- v4l2_m2m_ioctl_streamoff(file, fh, type);
-
- if (!q->streaming)
- return 0;
-
- mutex_lock(&m2m->lock);
-
- /*
- * If the last context is this one, reset it to make sure the device
- * will be reconfigured when streaming is restarted.
- */
- if (m2m->last_ctx == ctx)
- m2m->last_ctx = NULL;
-
- /* Free the channel resources if this is the last chained context. */
- if (ctx->chained && --m2m->chained_count == 0)
- mxc_isi_channel_unchain(m2m->pipe);
- ctx->chained = false;
-
- /* Turn off the light with the last user. */
- if (--m2m->usage_count == 0) {
- mxc_isi_channel_disable(m2m->pipe);
- mxc_isi_channel_put(m2m->pipe);
- mxc_isi_channel_release(m2m->pipe);
- }
-
- WARN_ON(m2m->usage_count < 0);
-
- mutex_unlock(&m2m->lock);
-
- q->streaming = false;
-
- return 0;
-}
-
static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
.vidioc_querycap = mxc_isi_m2m_querycap,
@@ -633,8 +592,8 @@ static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
- .vidioc_streamon = mxc_isi_m2m_streamon,
- .vidioc_streamoff = mxc_isi_m2m_streamoff,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
index d76eb58deb09..a41c51dd9ce0 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
@@ -855,7 +855,7 @@ int mxc_isi_pipe_acquire(struct mxc_isi_pipe *pipe,
/* Chain the channel if needed for wide resolutions. */
if (sink_fmt->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
- ret = mxc_isi_channel_chain(pipe, bypass);
+ ret = mxc_isi_channel_chain(pipe);
if (ret)
mxc_isi_channel_release(pipe);
}
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index dbcf17fb3ef2..f31a91ec7a6d 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -812,6 +812,9 @@ static int m_can_handle_state_change(struct net_device *dev,
u32 timestamp = 0;
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cdev->can.can_stats.error_warning++;
@@ -841,6 +844,12 @@ static int m_can_handle_state_change(struct net_device *dev,
__m_can_get_berr_counter(dev, &bec);
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
@@ -877,30 +886,33 @@ static int m_can_handle_state_change(struct net_device *dev,
return 1;
}
-static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
+static enum can_state
+m_can_state_get_by_psr(struct m_can_classdev *cdev)
{
- struct m_can_classdev *cdev = netdev_priv(dev);
- int work_done = 0;
+ u32 reg_psr;
- if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
- netdev_dbg(dev, "entered error warning state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_WARNING);
- }
+ reg_psr = m_can_read(cdev, M_CAN_PSR);
- if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
- netdev_dbg(dev, "entered error passive state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_PASSIVE);
- }
+ if (reg_psr & PSR_BO)
+ return CAN_STATE_BUS_OFF;
+ if (reg_psr & PSR_EP)
+ return CAN_STATE_ERROR_PASSIVE;
+ if (reg_psr & PSR_EW)
+ return CAN_STATE_ERROR_WARNING;
- if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "entered error bus off state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_BUS_OFF);
- }
+ return CAN_STATE_ERROR_ACTIVE;
+}
- return work_done;
+static int m_can_handle_state_errors(struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ enum can_state new_state;
+
+ new_state = m_can_state_get_by_psr(cdev);
+ if (new_state == cdev->can.state)
+ return 0;
+
+ return m_can_handle_state_change(dev, new_state);
}
static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
@@ -1031,8 +1043,7 @@ static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
}
if (irqstatus & IR_ERR_STATE)
- work_done += m_can_handle_state_errors(dev,
- m_can_read(cdev, M_CAN_PSR));
+ work_done += m_can_handle_state_errors(dev);
if (irqstatus & IR_ERR_BUS_30X)
work_done += m_can_handle_bus_errors(dev, irqstatus,
@@ -1606,7 +1617,7 @@ static int m_can_start(struct net_device *dev)
netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
cdev->tx_max_coalesced_frames);
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ cdev->can.state = m_can_state_get_by_psr(cdev);
m_can_enable_all_interrupts(cdev);
@@ -1785,6 +1796,13 @@ static void m_can_stop(struct net_device *dev)
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
+
+ if (cdev->ops->deinit) {
+ ret = cdev->ops->deinit(cdev);
+ if (ret)
+ netdev_err(dev, "failed to deinitialize: %pe\n",
+ ERR_PTR(ret));
+ }
}
static int m_can_close(struct net_device *dev)
@@ -2467,6 +2485,7 @@ int m_can_class_suspend(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
@@ -2479,18 +2498,20 @@ int m_can_class_suspend(struct device *dev)
if (cdev->pm_wake_source) {
hrtimer_cancel(&cdev->hrtimer);
m_can_write(cdev, M_CAN_IE, IR_RF0N);
+
+ if (cdev->ops->deinit)
+ ret = cdev->ops->deinit(cdev);
} else {
m_can_stop(ndev);
}
m_can_clk_stop(cdev);
+ cdev->can.state = CAN_STATE_SLEEPING;
}
pinctrl_pm_select_sleep_state(dev);
- cdev->can.state = CAN_STATE_SLEEPING;
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_suspend);
@@ -2498,14 +2519,11 @@ int m_can_class_resume(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
pinctrl_pm_select_default_state(dev);
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
-
if (netif_running(ndev)) {
- int ret;
-
ret = m_can_clk_start(cdev);
if (ret)
return ret;
@@ -2518,6 +2536,12 @@ int m_can_class_resume(struct device *dev)
* again.
*/
cdev->active_interrupts |= IR_RF0N | IR_TEFN;
+
+ if (cdev->ops->init)
+ ret = cdev->ops->init(cdev);
+
+ cdev->can.state = m_can_state_get_by_psr(cdev);
+
m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
} else {
ret = m_can_start(ndev);
@@ -2531,7 +2555,7 @@ int m_can_class_resume(struct device *dev)
netif_start_queue(ndev);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_resume);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index ef39e8e527ab..bd4746c63af3 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -68,6 +68,7 @@ struct m_can_ops {
int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset,
const void *val, size_t val_count);
int (*init)(struct m_can_classdev *cdev);
+ int (*deinit)(struct m_can_classdev *cdev);
};
struct m_can_tx_op {
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index b832566efda0..057eaa7b8b4b 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -180,7 +180,7 @@ static void m_can_plat_remove(struct platform_device *pdev)
struct m_can_classdev *mcan_class = &priv->cdev;
m_can_class_unregister(mcan_class);
-
+ pm_runtime_disable(mcan_class->dev);
m_can_class_free_dev(mcan_class->net);
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index b6f4de375df7..fb904dc28b8e 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -286,11 +286,6 @@ struct gs_host_frame {
#define GS_MAX_RX_URBS 30
#define GS_NAPI_WEIGHT 32
-/* Maximum number of interfaces the driver supports per device.
- * Current hardware only supports 3 interfaces. The future may vary.
- */
-#define GS_MAX_INTF 3
-
struct gs_tx_context {
struct gs_can *dev;
unsigned int echo_id;
@@ -321,7 +316,6 @@ struct gs_can {
/* usb interface struct */
struct gs_usb {
- struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
struct usb_device *udev;
@@ -333,9 +327,11 @@ struct gs_usb {
unsigned int hf_size_rx;
u8 active_channels;
+ u8 channel_cnt;
unsigned int pipe_in;
unsigned int pipe_out;
+ struct gs_can *canch[] __counted_by(channel_cnt);
};
/* 'allocate' a tx context.
@@ -596,7 +592,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
}
/* device reports out of range channel id */
- if (hf->channel >= GS_MAX_INTF)
+ if (hf->channel >= parent->channel_cnt)
goto device_detach;
dev = parent->canch[hf->channel];
@@ -696,7 +692,7 @@ resubmit_urb:
/* USB failure take down all interfaces */
if (rc == -ENODEV) {
device_detach:
- for (rc = 0; rc < GS_MAX_INTF; rc++) {
+ for (rc = 0; rc < parent->channel_cnt; rc++) {
if (parent->canch[rc])
netif_device_detach(parent->canch[rc]->netdev);
}
@@ -1246,6 +1242,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
netdev->dev_id = channel;
+ netdev->dev_port = channel;
/* dev setup */
strcpy(dev->bt_const.name, KBUILD_MODNAME);
@@ -1457,17 +1454,19 @@ static int gs_usb_probe(struct usb_interface *intf,
icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
- if (icount > GS_MAX_INTF) {
+ if (icount > type_max(parent->channel_cnt)) {
dev_err(&intf->dev,
"Driver cannot handle more that %u CAN interfaces\n",
- GS_MAX_INTF);
+ type_max(parent->channel_cnt));
return -EINVAL;
}
- parent = kzalloc(sizeof(*parent), GFP_KERNEL);
+ parent = kzalloc(struct_size(parent, canch, icount), GFP_KERNEL);
if (!parent)
return -ENOMEM;
+ parent->channel_cnt = icount;
+
init_usb_anchor(&parent->rx_submitted);
usb_set_intfdata(intf, parent);
@@ -1528,7 +1527,7 @@ static void gs_usb_disconnect(struct usb_interface *intf)
return;
}
- for (i = 0; i < GS_MAX_INTF; i++)
+ for (i = 0; i < parent->channel_cnt; i++)
if (parent->canch[i])
gs_destroy_candev(parent->canch[i]);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 8bc49259d71a..32a6d5261424 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1172,7 +1172,6 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
- pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
return pdata->phy_if.phy_reset(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index ed76a8df6ec6..75e9cb3fc7aa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1664,6 +1664,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL;
}
+ pdata->phy_link = 0;
pdata->phy.link = 0;
pdata->phy.pause_autoneg = pdata->pause_autoneg;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 717e110d23c9..dc170feee8ad 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -5803,7 +5803,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
u32 current_speed = SPEED_UNKNOWN;
u8 current_duplex = DUPLEX_UNKNOWN;
bool current_link_up = false;
- u32 local_adv, remote_adv, sgsr;
+ u32 local_adv = 0, remote_adv = 0, sgsr;
if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720) &&
@@ -5944,9 +5944,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
else
current_duplex = DUPLEX_HALF;
- local_adv = 0;
- remote_adv = 0;
-
if (bmcr & BMCR_ANENABLE) {
u32 common;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 92856cf387c7..7c9658a4ec4b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -498,25 +498,34 @@ static int alloc_list(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
/* Allocated fixed size of skbuff */
struct sk_buff *skb;
+ dma_addr_t addr;
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
- if (!skb) {
- free_list(dev);
- return -ENOMEM;
- }
+ if (!skb)
+ goto err_free_list;
+
+ addr = dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pdev->dev, addr))
+ goto err_kfree_skb;
np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
sizeof(struct netdev_desc));
/* Rubicon now supports 40 bits of addressing space. */
- np->rx_ring[i].fraginfo =
- cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
- np->rx_buf_sz, DMA_FROM_DEVICE));
+ np->rx_ring[i].fraginfo = cpu_to_le64(addr);
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
return 0;
+
+err_kfree_skb:
+ dev_kfree_skb(np->rx_skbuff[i]);
+ np->rx_skbuff[i] = NULL;
+err_free_list:
+ free_list(dev);
+ return -ENOMEM;
}
static void rio_hw_init(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 5f08779c0e4e..e177d1d58696 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_DEFINES_H_
#define _IXGBEVF_DEFINES_H_
@@ -16,6 +16,9 @@
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
@@ -25,6 +28,7 @@
/* Link speed */
typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_UNKNOWN 0
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_100_FULL 0x0008
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index f804b35d79c7..83b3243f172c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -271,6 +271,9 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return -EOPNOTSUPP;
+
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload");
return -EINVAL;
@@ -400,6 +403,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return;
+
if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
@@ -628,6 +634,10 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
size_t size;
switch (adapter->hw.api_version) {
+ case ixgbe_mbox_api_17:
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return;
+ break;
case ixgbe_mbox_api_14:
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 130cb868774c..f31068e24e86 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_
@@ -366,6 +366,13 @@ struct ixgbevf_adapter {
/* Interrupt Throttle Rate */
u32 eitr_param;
+ u32 pf_features;
+#define IXGBEVF_PF_SUP_IPSEC BIT(0)
+#define IXGBEVF_PF_SUP_ESX_MBX BIT(1)
+
+#define IXGBEVF_SUPPORTED_FEATURES (IXGBEVF_PF_SUP_IPSEC | \
+ IXGBEVF_PF_SUP_ESX_MBX)
+
struct ixgbevf_hw_stats stats;
unsigned long state;
@@ -418,6 +425,8 @@ enum ixgbevf_boards {
board_X550EM_x_vf,
board_X550EM_x_vf_hv,
board_x550em_a_vf,
+ board_e610_vf,
+ board_e610_vf_hv,
};
enum ixgbevf_xcast_modes {
@@ -434,11 +443,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
+extern const struct ixgbevf_info ixgbevf_e610_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info;
extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
/* needed by ethtool.c */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 149911e3002a..a2a7cb6d8ea1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
@@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2018 Intel Corporation.";
+ "Copyright (c) 2009 - 2024 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
[board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
[board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
+ [board_e610_vf] = &ixgbevf_e610_vf_info,
+ [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
+ {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID,
+ IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf},
/* required last entry */
{0, }
};
@@ -2269,10 +2274,36 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
}
+/**
+ * ixgbevf_set_features - Set features supported by PF
+ * @adapter: pointer to the adapter struct
+ *
+ * Negotiate with PF supported features and then set pf_features accordingly.
+ */
+static void ixgbevf_set_features(struct ixgbevf_adapter *adapter)
+{
+ u32 *pf_features = &adapter->pf_features;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ err = hw->mac.ops.negotiate_features(hw, pf_features);
+ if (err && err != -EOPNOTSUPP)
+ netdev_dbg(adapter->netdev,
+ "PF feature negotiation failed.\n");
+
+ /* Address also pre API 1.7 cases */
+ if (hw->api_version == ixgbe_mbox_api_14)
+ *pf_features |= IXGBEVF_PF_SUP_IPSEC;
+ else if (hw->api_version == ixgbe_mbox_api_15)
+ *pf_features |= IXGBEVF_PF_SUP_ESX_MBX;
+}
+
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
static const int api[] = {
+ ixgbe_mbox_api_17,
+ ixgbe_mbox_api_16,
ixgbe_mbox_api_15,
ixgbe_mbox_api_14,
ixgbe_mbox_api_13,
@@ -2292,7 +2323,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
- if (hw->api_version >= ixgbe_mbox_api_15) {
+ ixgbevf_set_features(adapter);
+
+ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) {
hw->mbx.ops.init_params(hw);
memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
sizeof(struct ixgbe_mbx_operations));
@@ -2649,6 +2682,8 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
@@ -4643,6 +4678,8 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
@@ -4693,6 +4730,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X540_vf:
dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
break;
+ case ixgbe_mac_e610_vf:
+ dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n");
+ break;
case ixgbe_mac_82599_vf:
default:
dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 835bbcc5cc8e..a8ed23ee66aa 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -66,6 +66,8 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
+ ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */
+ ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -102,6 +104,12 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+/* mailbox API, version 1.6 VF requests */
+#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */
+
+/* mailbox API, version 1.7 VF requests */
+#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF*/
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 1641d00d8ed3..65257107dfc8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "vf.h"
#include "ixgbevf.h"
@@ -313,6 +313,8 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
@@ -382,6 +384,8 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
@@ -552,6 +556,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
@@ -625,6 +631,85 @@ static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
}
/**
+ * ixgbevf_get_pf_link_state - Get PF's link status
+ * @hw: pointer to the HW structure
+ * @speed: link speed
+ * @link_up: indicate if link is up/down
+ *
+ * Ask PF to provide link_up state and speed of the link.
+ *
+ * Return: IXGBE_ERR_MBX in the case of mailbox error,
+ * -EOPNOTSUPP if the op is not supported or 0 on success.
+ */
+static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ u32 msgbuf[3] = {};
+ int err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ err = IXGBE_ERR_MBX;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* No need to set @link_up to false as it will be done by
+ * ixgbe_check_mac_link_vf().
+ */
+ } else {
+ *speed = msgbuf[1];
+ *link_up = msgbuf[2];
+ }
+
+ return err;
+}
+
+/**
+ * ixgbevf_negotiate_features_vf - negotiate supported features with PF driver
+ * @hw: pointer to the HW structure
+ * @pf_features: bitmask of features supported by PF
+ *
+ * Return: IXGBE_ERR_MBX in the case of mailbox error,
+ * -EOPNOTSUPP if the op is not supported or 0 on success.
+ */
+static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features)
+{
+ u32 msgbuf[2] = {};
+ int err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE;
+ msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ err = IXGBE_ERR_MBX;
+ *pf_features = 0x0;
+ } else {
+ *pf_features = msgbuf[1];
+ }
+
+ return err;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -659,6 +744,58 @@ mbx_err:
}
/**
+ * ixgbe_read_vflinks - Read VFLINKS register
+ * @hw: pointer to the HW structure
+ * @speed: link speed
+ * @link_up: indicate if link is up/down
+ *
+ * Get linkup status and link speed from the VFLINKS register.
+ */
+static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ /* if link status is down no point in checking to see if PF is up */
+ if (!(vflinks & IXGBE_LINKS_UP)) {
+ *link_up = false;
+ return;
+ }
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (hw->mac.type == ixgbe_mac_82599_vf) {
+ for (int i = 0; i < 5; i++) {
+ udelay(100);
+ vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(vflinks & IXGBE_LINKS_UP)) {
+ *link_up = false;
+ return;
+ }
+ }
+ }
+
+ /* We reached this point so there's link */
+ *link_up = true;
+
+ switch (vflinks & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+}
+
+/**
* ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
* @hw: unused
* @vlan: unused
@@ -702,10 +839,10 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
bool *link_up,
bool autoneg_wait_to_complete)
{
+ struct ixgbevf_adapter *adapter = hw->back;
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
s32 ret_val = 0;
- u32 links_reg;
u32 in_msg = 0;
/* If we were hit with a reset drop the link */
@@ -715,43 +852,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
if (!mac->get_link_status)
goto out;
- /* if link status is down no point in checking to see if pf is up */
- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
- if (!(links_reg & IXGBE_LINKS_UP))
- goto out;
-
- /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
- * before the link status is correct
- */
- if (mac->type == ixgbe_mac_82599_vf) {
- int i;
-
- for (i = 0; i < 5; i++) {
- udelay(100);
- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
-
- if (!(links_reg & IXGBE_LINKS_UP))
- goto out;
- }
- }
-
- switch (links_reg & IXGBE_LINKS_SPEED_82599) {
- case IXGBE_LINKS_SPEED_10G_82599:
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- break;
- case IXGBE_LINKS_SPEED_1G_82599:
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- break;
- case IXGBE_LINKS_SPEED_100_82599:
- *speed = IXGBE_LINK_SPEED_100_FULL;
- break;
+ if (hw->mac.type == ixgbe_mac_e610_vf) {
+ ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up);
+ if (ret_val)
+ goto out;
+ } else {
+ ixgbe_read_vflinks(hw, speed, link_up);
+ if (*link_up == false)
+ goto out;
}
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
if (mbx->ops.read(hw, &in_msg, 1)) {
- if (hw->api_version >= ixgbe_mbox_api_15)
+ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX)
mac->get_link_status = false;
goto out;
}
@@ -951,6 +1066,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return 0;
@@ -1005,6 +1122,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
.negotiate_api_version = ixgbevf_negotiate_api_version_vf,
+ .negotiate_features = ixgbevf_negotiate_features_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
@@ -1076,3 +1194,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
.mac = ixgbe_mac_x550em_a_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_e610_vf_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
+const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index b4eef5b6c172..4f19b8900c29 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
@@ -26,6 +26,7 @@ struct ixgbe_mac_operations {
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
+ int (*negotiate_features)(struct ixgbe_hw *hw, u32 *pf_features);
/* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
@@ -54,6 +55,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_x550em_a_vf,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 7b82779e4cd5..80b5262d0d57 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5060,8 +5060,9 @@ static int rtl8169_resume(struct device *device)
if (!device_may_wakeup(tp_to_dev(tp)))
clk_prepare_enable(tp->clk);
- /* Reportedly at least Asus X453MA truncates packets otherwise */
- if (tp->mac_version == RTL_GIGA_MAC_VER_37)
+ /* Some chip versions may truncate packets without this initialization */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_37 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_46)
rtl_init_rxcfg(tp);
return rtl8169_runtime_resume(device);
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index ee2a7b2f6268..e2d92295ad33 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -433,6 +433,7 @@ static void nsim_enable_napi(struct netdevsim *ns)
static int nsim_open(struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct netdevsim *peer;
int err;
err = nsim_init_napi(ns);
@@ -441,6 +442,12 @@ static int nsim_open(struct net_device *dev)
nsim_enable_napi(ns);
+ peer = rtnl_dereference(ns->peer);
+ if (peer && netif_running(peer->netdev)) {
+ netif_carrier_on(dev);
+ netif_carrier_on(peer->netdev);
+ }
+
return 0;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 2f8637224b69..2da67814556f 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1920,13 +1920,19 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_regs = lan78xx_get_regs,
};
-static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+static int lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
u8 addr[6];
+ int ret;
+
+ ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+ if (ret < 0)
+ return ret;
- lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
- lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ if (ret < 0)
+ return ret;
addr[0] = addr_lo & 0xFF;
addr[1] = (addr_lo >> 8) & 0xFF;
@@ -1959,14 +1965,26 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
(addr[2] << 16) | (addr[3] << 24);
addr_hi = addr[4] | (addr[5] << 8);
- lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ if (ret < 0)
+ return ret;
}
- lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ if (ret < 0)
+ return ret;
eth_hw_addr_set(dev->net, addr);
+
+ return 0;
}
/* MDIO read and write wrappers for phylib */
@@ -2905,8 +2923,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
}
} while (buf & HW_CFG_LRST_);
- lan78xx_init_mac_address(dev);
-
/* save DEVID for later usage */
ret = lan78xx_read_reg(dev, ID_REV, &buf);
if (ret < 0)
@@ -2915,6 +2931,10 @@ static int lan78xx_reset(struct lan78xx_net *dev)
dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
+ ret = lan78xx_init_mac_address(dev);
+ if (ret < 0)
+ return ret;
+
/* Respond to the IN token with a NAK */
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
if (ret < 0)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2cab046749a9..3fcd2b736c5e 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10151,7 +10151,12 @@ static int __init rtl8152_driver_init(void)
ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE);
if (ret)
return ret;
- return usb_register(&rtl8152_driver);
+
+ ret = usb_register(&rtl8152_driver);
+ if (ret)
+ usb_deregister_device_driver(&rtl8152_cfgselector_driver);
+
+ return ret;
}
static void __exit rtl8152_driver_exit(void)
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 99711a4fb85d..df8fad7a2aea 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -978,25 +978,14 @@ void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel)
}
int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
- int qsel, unsigned int timeout)
+ struct rtw89_tx_wait_info *wait, int qsel,
+ unsigned int timeout)
{
- struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
- struct rtw89_tx_wait_info *wait;
unsigned long time_left;
int ret = 0;
lockdep_assert_wiphy(rtwdev->hw->wiphy);
- wait = kzalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- rtw89_core_tx_kick_off(rtwdev, qsel);
- return 0;
- }
-
- init_completion(&wait->completion);
- wait->skb = skb;
- rcu_assign_pointer(skb_data->wait, wait);
-
rtw89_core_tx_kick_off(rtwdev, qsel);
time_left = wait_for_completion_timeout(&wait->completion,
msecs_to_jiffies(timeout));
@@ -1057,10 +1046,12 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
}
int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel)
+ struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel,
+ struct rtw89_tx_wait_info *wait)
{
struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
struct rtw89_core_tx_request tx_req = {0};
struct rtw89_sta_link *rtwsta_link = NULL;
struct rtw89_vif_link *rtwvif_link;
@@ -1093,6 +1084,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
rtw89_core_tx_wake(rtwdev, &tx_req);
+ rcu_assign_pointer(skb_data->wait, wait);
+
ret = rtw89_hci_tx_write(rtwdev, &tx_req);
if (ret) {
rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
@@ -2908,7 +2901,7 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
goto out;
}
rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb);
- ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL);
+ ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL, NULL);
if (ret) {
rtw89_err(rtwdev, "failed to push txq: %d\n", ret);
ieee80211_free_txskb(rtwdev->hw, skb);
@@ -3084,7 +3077,7 @@ bottom:
skb_queue_walk_safe(&rtwsta->roc_queue, skb, tmp) {
skb_unlink(skb, &rtwsta->roc_queue);
- ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
+ ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel, NULL);
if (ret) {
rtw89_warn(rtwdev, "pending tx failed with %d\n", ret);
dev_kfree_skb_any(skb);
@@ -3106,6 +3099,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool qos, bool ps)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ struct rtw89_tx_wait_info *wait;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
@@ -3114,6 +3108,12 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
return 0;
+ wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait)
+ return -ENOMEM;
+
+ init_completion(&wait->completion);
+
rcu_read_lock();
sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
if (!sta) {
@@ -3127,11 +3127,13 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
goto out;
}
+ wait->skb = skb;
+
hdr = (struct ieee80211_hdr *)skb->data;
if (ps)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
- ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
+ ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel, wait);
if (ret) {
rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret);
dev_kfree_skb_any(skb);
@@ -3140,10 +3142,11 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
rcu_read_unlock();
- return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel,
+ return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, wait, qsel,
RTW89_ROC_TX_TIMEOUT);
out:
rcu_read_unlock();
+ kfree(wait);
return ret;
}
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index cb703588e3a4..7be32b83d4d6 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -6818,12 +6818,14 @@ static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev)
}
int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
+ struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel,
+ struct rtw89_tx_wait_info *wait);
int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
struct sk_buff *skb, bool fwdl);
void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel);
int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
- int qsel, unsigned int timeout);
+ struct rtw89_tx_wait_info *wait, int qsel,
+ unsigned int timeout);
void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 3a1a2b243adf..d1f9bd41f8b5 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -36,7 +36,7 @@ static void rtw89_ops_tx(struct ieee80211_hw *hw,
return;
}
- ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
+ ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel, NULL);
if (ret) {
rtw89_err(rtwdev, "failed to transmit skb: %d\n", ret);
ieee80211_free_txskb(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 5fd5fe88e6b0..a87e1778a0d4 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -1366,7 +1366,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
struct pci_dev *pdev = rtwpci->pdev;
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
- struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
bool en_wd_info = desc_info->en_wd_info;
u32 txwd_len;
u32 txwp_len;
@@ -1382,7 +1381,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
}
tx_data->dma = dma;
- rcu_assign_pointer(skb_data->wait, NULL);
txwp_len = sizeof(*txwp_info);
txwd_len = chip->txwd_body_size;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 561dd08022c0..24cff8b04492 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -131,12 +131,14 @@ void nvme_mpath_start_request(struct request *rq)
struct nvme_ns *ns = rq->q->queuedata;
struct gendisk *disk = ns->head->disk;
- if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
+ if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) &&
+ !(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) {
atomic_inc(&ns->ctrl->nr_active);
nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
}
- if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
+ if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) ||
+ (nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
return;
nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 83a6b18b01ad..77df3432dfb7 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1075,6 +1075,9 @@ static void nvme_tcp_write_space(struct sock *sk)
queue = sk->sk_user_data;
if (likely(queue && sk_stream_is_writeable(sk))) {
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ /* Ensure pending TLS partial records are retried */
+ if (nvme_tcp_queue_tls(queue))
+ queue->write_space(sk);
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
read_unlock_bh(&sk->sk_callback_lock);
diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c
index dddb66de6dba..8d93a830ab8b 100644
--- a/drivers/phy/cadence/cdns-dphy.c
+++ b/drivers/phy/cadence/cdns-dphy.c
@@ -30,6 +30,7 @@
#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
#define DPHY_CMN_SSM_EN BIT(0)
+#define DPHY_CMN_SSM_CAL_WAIT_TIME GENMASK(8, 1)
#define DPHY_CMN_TX_MODE_EN BIT(9)
#define DPHY_CMN_PWM DPHY_PMA_CMN(0x40)
@@ -79,6 +80,7 @@ struct cdns_dphy_cfg {
u8 pll_ipdiv;
u8 pll_opdiv;
u16 pll_fbdiv;
+ u32 hs_clk_rate;
unsigned int nlanes;
};
@@ -99,6 +101,8 @@ struct cdns_dphy_ops {
void (*set_pll_cfg)(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg);
unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
+ int (*wait_for_pll_lock)(struct cdns_dphy *dphy);
+ int (*wait_for_cmn_ready)(struct cdns_dphy *dphy);
};
struct cdns_dphy {
@@ -108,6 +112,8 @@ struct cdns_dphy {
struct clk *pll_ref_clk;
const struct cdns_dphy_ops *ops;
struct phy *phy;
+ bool is_configured;
+ bool is_powered;
};
/* Order of bands is important since the index is the band number. */
@@ -154,6 +160,9 @@ static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
cfg->pll_ipdiv,
pll_ref_hz);
+ cfg->hs_clk_rate = div_u64((u64)pll_ref_hz * cfg->pll_fbdiv,
+ 2 * cfg->pll_opdiv * cfg->pll_ipdiv);
+
return 0;
}
@@ -191,6 +200,16 @@ static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
return dphy->ops->get_wakeup_time_ns(dphy);
}
+static int cdns_dphy_wait_for_pll_lock(struct cdns_dphy *dphy)
+{
+ return dphy->ops->wait_for_pll_lock ? dphy->ops->wait_for_pll_lock(dphy) : 0;
+}
+
+static int cdns_dphy_wait_for_cmn_ready(struct cdns_dphy *dphy)
+{
+ return dphy->ops->wait_for_cmn_ready ? dphy->ops->wait_for_cmn_ready(dphy) : 0;
+}
+
static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
{
/* Default wakeup time is 800 ns (in a simulated environment). */
@@ -232,7 +251,6 @@ static unsigned long cdns_dphy_j721e_get_wakeup_time_ns(struct cdns_dphy *dphy)
static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg)
{
- u32 status;
/*
* set the PWM and PLL Byteclk divider settings to recommended values
@@ -249,13 +267,6 @@ static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
writel(DPHY_TX_J721E_WIZ_LANE_RSTB,
dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL);
-
- readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
- (status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US);
-
- readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
- (status & DPHY_TX_WIZ_O_CMN_READY), 0,
- POLL_TIMEOUT_US);
}
static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
@@ -263,6 +274,23 @@ static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ);
}
+static int cdns_dphy_j721e_wait_for_pll_lock(struct cdns_dphy *dphy)
+{
+ u32 status;
+
+ return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
+ status & DPHY_TX_WIZ_PLL_LOCK, 0, POLL_TIMEOUT_US);
+}
+
+static int cdns_dphy_j721e_wait_for_cmn_ready(struct cdns_dphy *dphy)
+{
+ u32 status;
+
+ return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
+ status & DPHY_TX_WIZ_O_CMN_READY, 0,
+ POLL_TIMEOUT_US);
+}
+
/*
* This is the reference implementation of DPHY hooks. Specific integration of
* this IP may have to re-implement some of them depending on how they decided
@@ -278,6 +306,8 @@ static const struct cdns_dphy_ops j721e_dphy_ops = {
.get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns,
.set_pll_cfg = cdns_dphy_j721e_set_pll_cfg,
.set_psm_div = cdns_dphy_j721e_set_psm_div,
+ .wait_for_pll_lock = cdns_dphy_j721e_wait_for_pll_lock,
+ .wait_for_cmn_ready = cdns_dphy_j721e_wait_for_cmn_ready,
};
static int cdns_dphy_config_from_opts(struct phy *phy,
@@ -297,6 +327,7 @@ static int cdns_dphy_config_from_opts(struct phy *phy,
if (ret)
return ret;
+ opts->hs_clk_rate = cfg->hs_clk_rate;
opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000;
return 0;
@@ -334,21 +365,36 @@ static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
- struct cdns_dphy_cfg cfg = { 0 };
- int ret, band_ctrl;
- unsigned int reg;
+ int ret;
- ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
- if (ret)
- return ret;
+ ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &dphy->cfg);
+ if (!ret)
+ dphy->is_configured = true;
+
+ return ret;
+}
+
+static int cdns_dphy_power_on(struct phy *phy)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ int ret;
+ u32 reg;
+
+ if (!dphy->is_configured || dphy->is_powered)
+ return -EINVAL;
+
+ clk_prepare_enable(dphy->psm_clk);
+ clk_prepare_enable(dphy->pll_ref_clk);
/*
* Configure the internal PSM clk divider so that the DPHY has a
* 1MHz clk (or something close).
*/
ret = cdns_dphy_setup_psm(dphy);
- if (ret)
- return ret;
+ if (ret) {
+ dev_err(&dphy->phy->dev, "Failed to setup PSM with error %d\n", ret);
+ goto err_power_on;
+ }
/*
* Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
@@ -363,40 +409,61 @@ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
* Configure the DPHY PLL that will be used to generate the TX byte
* clk.
*/
- cdns_dphy_set_pll_cfg(dphy, &cfg);
+ cdns_dphy_set_pll_cfg(dphy, &dphy->cfg);
- band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
- if (band_ctrl < 0)
- return band_ctrl;
+ ret = cdns_dphy_tx_get_band_ctrl(dphy->cfg.hs_clk_rate);
+ if (ret < 0) {
+ dev_err(&dphy->phy->dev, "Failed to get band control value with error %d\n", ret);
+ goto err_power_on;
+ }
- reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
- FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
+ reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, ret) |
+ FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, ret);
writel(reg, dphy->regs + DPHY_BAND_CFG);
- return 0;
-}
+ /* Start TX state machine. */
+ reg = readl(dphy->regs + DPHY_CMN_SSM);
+ writel((reg & DPHY_CMN_SSM_CAL_WAIT_TIME) | DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
+ dphy->regs + DPHY_CMN_SSM);
-static int cdns_dphy_power_on(struct phy *phy)
-{
- struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ ret = cdns_dphy_wait_for_pll_lock(dphy);
+ if (ret) {
+ dev_err(&dphy->phy->dev, "Failed to lock PLL with error %d\n", ret);
+ goto err_power_on;
+ }
- clk_prepare_enable(dphy->psm_clk);
- clk_prepare_enable(dphy->pll_ref_clk);
+ ret = cdns_dphy_wait_for_cmn_ready(dphy);
+ if (ret) {
+ dev_err(&dphy->phy->dev, "O_CMN_READY signal failed to assert with error %d\n",
+ ret);
+ goto err_power_on;
+ }
- /* Start TX state machine. */
- writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
- dphy->regs + DPHY_CMN_SSM);
+ dphy->is_powered = true;
return 0;
+
+err_power_on:
+ clk_disable_unprepare(dphy->pll_ref_clk);
+ clk_disable_unprepare(dphy->psm_clk);
+
+ return ret;
}
static int cdns_dphy_power_off(struct phy *phy)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ u32 reg;
clk_disable_unprepare(dphy->pll_ref_clk);
clk_disable_unprepare(dphy->psm_clk);
+ /* Stop TX state machine. */
+ reg = readl(dphy->regs + DPHY_CMN_SSM);
+ writel(reg & ~DPHY_CMN_SSM_EN, dphy->regs + DPHY_CMN_SSM);
+
+ dphy->is_powered = false;
+
return 0;
}
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 7061720b9732..106046e17c4e 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -11,12 +11,15 @@
/* #define VERBOSE_DEBUG */
+#include <linux/cleanup.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/usb/gadget.h>
+
#include "u_serial.h"
@@ -613,6 +616,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_string *us;
int status;
struct usb_ep *ep;
+ struct usb_request *request __free(free_usb_request) = NULL;
/* REVISIT might want instance-specific strings to help
* distinguish instances ...
@@ -630,7 +634,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
- goto fail;
+ return status;
acm->ctrl_id = status;
acm_iad_descriptor.bFirstInterface = status;
@@ -639,43 +643,41 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_interface_id(c, f);
if (status < 0)
- goto fail;
+ return status;
acm->data_id = status;
acm_data_interface_desc.bInterfaceNumber = status;
acm_union_desc.bSlaveInterface0 = status;
acm_call_mgmt_descriptor.bDataInterface = status;
- status = -ENODEV;
-
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
acm->port.in = ep;
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
acm->port.out = ep;
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
acm->notify = ep;
acm_iad_descriptor.bFunctionProtocol = acm->bInterfaceProtocol;
acm_control_interface_desc.bInterfaceProtocol = acm->bInterfaceProtocol;
/* allocate notification */
- acm->notify_req = gs_alloc_req(ep,
- sizeof(struct usb_cdc_notification) + 2,
- GFP_KERNEL);
- if (!acm->notify_req)
- goto fail;
+ request = gs_alloc_req(ep,
+ sizeof(struct usb_cdc_notification) + 2,
+ GFP_KERNEL);
+ if (!request)
+ return -ENODEV;
- acm->notify_req->complete = acm_cdc_notify_complete;
- acm->notify_req->context = acm;
+ request->complete = acm_cdc_notify_complete;
+ request->context = acm;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
@@ -692,7 +694,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
acm_ss_function, acm_ss_function);
if (status)
- goto fail;
+ return status;
+
+ acm->notify_req = no_free_ptr(request);
dev_dbg(&cdev->gadget->dev,
"acm ttyGS%d: IN/%s OUT/%s NOTIFY/%s\n",
@@ -700,14 +704,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
acm->port.in->name, acm->port.out->name,
acm->notify->name);
return 0;
-
-fail:
- if (acm->notify_req)
- gs_free_req(acm->notify, acm->notify_req);
-
- ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
-
- return status;
}
static void acm_unbind(struct usb_configuration *c, struct usb_function *f)
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 549efc84dd83..123c03a08509 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -8,12 +8,15 @@
/* #define VERBOSE_DEBUG */
+#include <linux/cleanup.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
+#include <linux/usb/gadget.h>
+
#include "u_ether.h"
#include "u_ether_configfs.h"
#include "u_ecm.h"
@@ -678,6 +681,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_ep *ep;
struct f_ecm_opts *ecm_opts;
+ struct usb_request *request __free(free_usb_request) = NULL;
if (!can_support_ecm(cdev->gadget))
return -EINVAL;
@@ -711,7 +715,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
- goto fail;
+ return status;
ecm->ctrl_id = status;
ecm_iad_descriptor.bFirstInterface = status;
@@ -720,24 +724,22 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_interface_id(c, f);
if (status < 0)
- goto fail;
+ return status;
ecm->data_id = status;
ecm_data_nop_intf.bInterfaceNumber = status;
ecm_data_intf.bInterfaceNumber = status;
ecm_union_desc.bSlaveInterface0 = status;
- status = -ENODEV;
-
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
ecm->port.in_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_out_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
ecm->port.out_ep = ep;
/* NOTE: a status/notification endpoint is *OPTIONAL* but we
@@ -746,20 +748,18 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
*/
ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_notify_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
ecm->notify = ep;
- status = -ENOMEM;
-
/* allocate notification request and buffer */
- ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
- if (!ecm->notify_req)
- goto fail;
- ecm->notify_req->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL);
- if (!ecm->notify_req->buf)
- goto fail;
- ecm->notify_req->context = ecm;
- ecm->notify_req->complete = ecm_notify_complete;
+ request = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+ request->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!request->buf)
+ return -ENOMEM;
+ request->context = ecm;
+ request->complete = ecm_notify_complete;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
@@ -778,7 +778,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
ecm_ss_function, ecm_ss_function);
if (status)
- goto fail;
+ return status;
/* NOTE: all that is done without knowing or caring about
* the network link ... which is unavailable to this code
@@ -788,20 +788,12 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
ecm->port.open = ecm_open;
ecm->port.close = ecm_close;
+ ecm->notify_req = no_free_ptr(request);
+
DBG(cdev, "CDC Ethernet: IN/%s OUT/%s NOTIFY/%s\n",
ecm->port.in_ep->name, ecm->port.out_ep->name,
ecm->notify->name);
return 0;
-
-fail:
- if (ecm->notify_req) {
- kfree(ecm->notify_req->buf);
- usb_ep_free_request(ecm->notify, ecm->notify_req);
- }
-
- ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
-
- return status;
}
static inline struct f_ecm_opts *to_f_ecm_opts(struct config_item *item)
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 8e761249d672..3afc9a622086 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -11,6 +11,7 @@
* Copyright (C) 2008 Nokia Corporation
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -19,6 +20,7 @@
#include <linux/crc32.h>
#include <linux/usb/cdc.h>
+#include <linux/usb/gadget.h>
#include "u_ether.h"
#include "u_ether_configfs.h"
@@ -1435,18 +1437,18 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_ep *ep;
struct f_ncm_opts *ncm_opts;
+ struct usb_os_desc_table *os_desc_table __free(kfree) = NULL;
+ struct usb_request *request __free(free_usb_request) = NULL;
+
if (!can_support_ecm(cdev->gadget))
return -EINVAL;
ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
if (cdev->use_os_string) {
- f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
- GFP_KERNEL);
- if (!f->os_desc_table)
+ os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
+ if (!os_desc_table)
return -ENOMEM;
- f->os_desc_n = 1;
- f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
}
mutex_lock(&ncm_opts->lock);
@@ -1458,16 +1460,15 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
mutex_unlock(&ncm_opts->lock);
if (status)
- goto fail;
+ return status;
ncm_opts->bound = true;
us = usb_gstrings_attach(cdev, ncm_strings,
ARRAY_SIZE(ncm_string_defs));
- if (IS_ERR(us)) {
- status = PTR_ERR(us);
- goto fail;
- }
+ if (IS_ERR(us))
+ return PTR_ERR(us);
+
ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
@@ -1477,20 +1478,16 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
- goto fail;
+ return status;
ncm->ctrl_id = status;
ncm_iad_desc.bFirstInterface = status;
ncm_control_intf.bInterfaceNumber = status;
ncm_union_desc.bMasterInterface0 = status;
- if (cdev->use_os_string)
- f->os_desc_table[0].if_id =
- ncm_iad_desc.bFirstInterface;
-
status = usb_interface_id(c, f);
if (status < 0)
- goto fail;
+ return status;
ncm->data_id = status;
ncm_data_nop_intf.bInterfaceNumber = status;
@@ -1499,35 +1496,31 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ecm_desc.wMaxSegmentSize = cpu_to_le16(ncm_opts->max_segment_size);
- status = -ENODEV;
-
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
ncm->port.in_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
ncm->port.out_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
ncm->notify = ep;
- status = -ENOMEM;
-
/* allocate notification request and buffer */
- ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
- if (!ncm->notify_req)
- goto fail;
- ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
- if (!ncm->notify_req->buf)
- goto fail;
- ncm->notify_req->context = ncm;
- ncm->notify_req->complete = ncm_notify_complete;
+ request = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+ request->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!request->buf)
+ return -ENOMEM;
+ request->context = ncm;
+ request->complete = ncm_notify_complete;
/*
* support all relevant hardware speeds... we expect that when
@@ -1547,7 +1540,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
ncm_ss_function, ncm_ss_function);
if (status)
- goto fail;
+ return status;
/*
* NOTE: all that is done without knowing or caring about
@@ -1561,23 +1554,18 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ncm->task_timer.function = ncm_tx_timeout;
+ if (cdev->use_os_string) {
+ os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
+ os_desc_table[0].if_id = ncm_iad_desc.bFirstInterface;
+ f->os_desc_table = no_free_ptr(os_desc_table);
+ f->os_desc_n = 1;
+ }
+ ncm->notify_req = no_free_ptr(request);
+
DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
ncm->port.in_ep->name, ncm->port.out_ep->name,
ncm->notify->name);
return 0;
-
-fail:
- kfree(f->os_desc_table);
- f->os_desc_n = 0;
-
- if (ncm->notify_req) {
- kfree(ncm->notify_req->buf);
- usb_ep_free_request(ncm->notify, ncm->notify_req);
- }
-
- ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
-
- return status;
}
static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item)
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 7cec19d65fb5..7451e7cb7a85 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -19,6 +19,8 @@
#include <linux/atomic.h>
+#include <linux/usb/gadget.h>
+
#include "u_ether.h"
#include "u_ether_configfs.h"
#include "u_rndis.h"
@@ -662,6 +664,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_ep *ep;
struct f_rndis_opts *rndis_opts;
+ struct usb_os_desc_table *os_desc_table __free(kfree) = NULL;
+ struct usb_request *request __free(free_usb_request) = NULL;
if (!can_support_rndis(c))
return -EINVAL;
@@ -669,12 +673,9 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
rndis_opts = container_of(f->fi, struct f_rndis_opts, func_inst);
if (cdev->use_os_string) {
- f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
- GFP_KERNEL);
- if (!f->os_desc_table)
+ os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
+ if (!os_desc_table)
return -ENOMEM;
- f->os_desc_n = 1;
- f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
}
rndis_iad_descriptor.bFunctionClass = rndis_opts->class;
@@ -692,16 +693,14 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
gether_set_gadget(rndis_opts->net, cdev->gadget);
status = gether_register_netdev(rndis_opts->net);
if (status)
- goto fail;
+ return status;
rndis_opts->bound = true;
}
us = usb_gstrings_attach(cdev, rndis_strings,
ARRAY_SIZE(rndis_string_defs));
- if (IS_ERR(us)) {
- status = PTR_ERR(us);
- goto fail;
- }
+ if (IS_ERR(us))
+ return PTR_ERR(us);
rndis_control_intf.iInterface = us[0].id;
rndis_data_intf.iInterface = us[1].id;
rndis_iad_descriptor.iFunction = us[2].id;
@@ -709,36 +708,30 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
- goto fail;
+ return status;
rndis->ctrl_id = status;
rndis_iad_descriptor.bFirstInterface = status;
rndis_control_intf.bInterfaceNumber = status;
rndis_union_desc.bMasterInterface0 = status;
- if (cdev->use_os_string)
- f->os_desc_table[0].if_id =
- rndis_iad_descriptor.bFirstInterface;
-
status = usb_interface_id(c, f);
if (status < 0)
- goto fail;
+ return status;
rndis->data_id = status;
rndis_data_intf.bInterfaceNumber = status;
rndis_union_desc.bSlaveInterface0 = status;
- status = -ENODEV;
-
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
rndis->port.in_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
rndis->port.out_ep = ep;
/* NOTE: a status/notification endpoint is, strictly speaking,
@@ -747,21 +740,19 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
*/
ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc);
if (!ep)
- goto fail;
+ return -ENODEV;
rndis->notify = ep;
- status = -ENOMEM;
-
/* allocate notification request and buffer */
- rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
- if (!rndis->notify_req)
- goto fail;
- rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
- if (!rndis->notify_req->buf)
- goto fail;
- rndis->notify_req->length = STATUS_BYTECOUNT;
- rndis->notify_req->context = rndis;
- rndis->notify_req->complete = rndis_response_complete;
+ request = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+ request->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!request->buf)
+ return -ENOMEM;
+ request->length = STATUS_BYTECOUNT;
+ request->context = rndis;
+ request->complete = rndis_response_complete;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
@@ -778,7 +769,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
eth_ss_function, eth_ss_function);
if (status)
- goto fail;
+ return status;
rndis->port.open = rndis_open;
rndis->port.close = rndis_close;
@@ -789,9 +780,18 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
if (rndis->manufacturer && rndis->vendorID &&
rndis_set_param_vendor(rndis->params, rndis->vendorID,
rndis->manufacturer)) {
- status = -EINVAL;
- goto fail_free_descs;
+ usb_free_all_descriptors(f);
+ return -EINVAL;
+ }
+
+ if (cdev->use_os_string) {
+ os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
+ os_desc_table[0].if_id = rndis_iad_descriptor.bFirstInterface;
+ f->os_desc_table = no_free_ptr(os_desc_table);
+ f->os_desc_n = 1;
+
}
+ rndis->notify_req = no_free_ptr(request);
/* NOTE: all that is done without knowing or caring about
* the network link ... which is unavailable to this code
@@ -802,21 +802,6 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
rndis->port.in_ep->name, rndis->port.out_ep->name,
rndis->notify->name);
return 0;
-
-fail_free_descs:
- usb_free_all_descriptors(f);
-fail:
- kfree(f->os_desc_table);
- f->os_desc_n = 0;
-
- if (rndis->notify_req) {
- kfree(rndis->notify_req->buf);
- usb_ep_free_request(rndis->notify, rndis->notify_req);
- }
-
- ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
-
- return status;
}
void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net)
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index d709e24c1fd4..e3d63b8fa0f4 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -194,6 +194,9 @@ struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
req = ep->ops->alloc_request(ep, gfp_flags);
+ if (req)
+ req->ep = ep;
+
trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
return req;