summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/zram/zram_drv.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c2
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/dpll/dpll_netlink.c4
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c6
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c6
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h5
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h40
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c22
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c25
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c1
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c62
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c139
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h2
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c12
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c4
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c9
-rw-r--r--drivers/iommu/amd/io_pgtable.c25
-rw-r--r--drivers/iommu/intel/iommu.c7
-rw-r--r--drivers/iommu/s390-iommu.c29
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-stripe.c10
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c68
-rw-r--r--drivers/mmc/host/sdhci-uhs2.c3
-rw-r--r--drivers/mmc/host/sdhci.c34
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c16
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c6
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c37
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.h5
-rw-r--r--drivers/nvme/host/core.c18
-rw-r--r--drivers/pcmcia/omap_cf.c8
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c25
-rw-r--r--drivers/platform/x86/asus-wmi.h3
-rw-r--r--drivers/power/supply/bq27xxx_battery.c4
72 files changed, 767 insertions, 253 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 54c57103715f..0f7bcc86b6f7 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1794,6 +1794,7 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
u32 index)
{
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_SAME);
zram_set_handle(zram, index, fill);
zram_slot_unlock(zram, index);
@@ -1831,6 +1832,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
kunmap_local(src);
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_HUGE);
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, PAGE_SIZE);
@@ -1854,11 +1856,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
unsigned long element;
bool same_filled;
- /* First, free memory allocated to this slot (if any) */
- zram_slot_lock(zram, index);
- zram_free_page(zram, index);
- zram_slot_unlock(zram, index);
-
mem = kmap_local_page(page);
same_filled = page_same_filled(mem, &element);
kunmap_local(mem);
@@ -1900,6 +1897,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
zcomp_stream_put(zstrm);
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 354c981943b6..4221b1888b38 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -185,7 +185,7 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
p &= (1 << cmp->p.width) - 1;
if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
- rate = (parent_rate / p) / m;
+ rate = (parent_rate / (p + cmp->p.offset)) / m;
else
rate = (parent_rate >> p) / m;
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index e058ba027792..9f5ccc1720cb 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -2430,7 +2430,7 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{
int error;
- __sev_platform_shutdown_locked(NULL);
+ __sev_platform_shutdown_locked(&error);
if (sev_es_tmr) {
/*
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index c130f87147fa..815de752bcd3 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -173,8 +173,8 @@ static int
dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
+ DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) = { 0 };
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
- DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) = { 0 };
enum dpll_clock_quality_level ql;
int ret;
@@ -183,7 +183,7 @@ dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
ret = ops->clock_quality_level_get(dpll, dpll_priv(dpll), qls, extack);
if (ret)
return ret;
- for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX)
+ for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1)
if (nla_put_u32(msg, DPLL_A_CLOCK_QUALITY_LEVEL, ql))
return -EMSGSIZE;
diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c
index 12b24a717e43..d11bcaf1ae88 100644
--- a/drivers/gpio/gpiolib-acpi-core.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -942,7 +942,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
@@ -992,7 +992,7 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id,
int ret;
for (i = 0, idx = 0; idx <= index; i++) {
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
/* Ignore -EPROBE_DEFER, it only matters if idx matches */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index fe282b855734..31010040a12f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -250,16 +250,24 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
{
- if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
+ else
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ }
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
{
int r = 0;
- if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
+ else
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index b7c3ec483407..861697490ac2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -426,7 +426,9 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
int kgd2kfd_check_and_lock_kfd(void);
void kgd2kfd_unlock_kfd(void);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
bool retry_fault);
@@ -516,11 +518,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return 0;
}
+static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
return 0;
}
+static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a57e8c5474bb..a65591f70b15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5055,7 +5055,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
- if (!adev->in_s0ix && !adev->in_runpm)
+ if (!adev->in_runpm)
amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
@@ -5075,10 +5075,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
- if (!adev->in_s0ix) {
- amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- amdgpu_userq_suspend(adev);
- }
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ amdgpu_userq_suspend(adev);
r = amdgpu_device_evict_resources(adev);
if (r)
@@ -5141,15 +5139,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
goto exit;
}
- if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- if (r)
- goto exit;
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (r)
+ goto exit;
- r = amdgpu_userq_resume(adev);
- if (r)
- goto exit;
- }
+ r = amdgpu_userq_resume(adev);
+ if (r)
+ goto exit;
r = amdgpu_device_ip_late_init(adev);
if (r)
@@ -5162,7 +5158,7 @@ exit:
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
- if (!adev->in_s0ix && !r && !adev->in_runpm)
+ if (!r && !adev->in_runpm)
r = amdgpu_amdkfd_resume_process(adev);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 097bf6753782..f512879cb71c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -1501,6 +1501,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return ret;
}
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.unhalt(node->dqm);
+ if (r) {
+ dev_err(kfd_device, "Error in starting scheduler\n");
+ return r;
+ }
+ }
+ return 0;
+}
+
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
@@ -1518,6 +1537,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
return node->dqm->ops.halt(node->dqm);
}
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.halt(node->dqm);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 312f6075e39d..58ea351dd48b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8689,7 +8689,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
-{
+{ /*
+ * We cannot be sure that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ * So we have to go through the CRTC to find the right IRQ.
+ */
+ int irq_type = amdgpu_display_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+ struct drm_device *dev = adev_to_drm(adev);
+
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@@ -8742,7 +8751,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+ if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
+#endif
+ }
+
} else {
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
+#endif
+ if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
+ }
+
drm_crtc_vblank_off(&acrtc->base);
}
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 26b8e232f858..2278a123db23 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2185,7 +2185,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
return ret;
}
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) {
ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 8a9079c2ed5c..8257132a8ee9 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2678,7 +2678,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
NULL, anx7625_intr_hpd_isr,
IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"anx7625-intp", platform);
if (ret) {
DRM_DEV_ERROR(dev, "fail to request irq\n");
@@ -2747,8 +2747,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
}
/* Add work function */
- if (platform->pdata.intp_irq)
+ if (platform->pdata.intp_irq) {
+ enable_irq(platform->pdata.intp_irq);
queue_work(platform->workqueue, &platform->work);
+ }
if (platform->pdata.audio_en)
anx7625_register_audio(dev, platform);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index b431e7efd1f0..dbef0ca1a22a 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -1984,8 +1984,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
- if (!mhdp_state->current_mode)
- return;
+ if (!mhdp_state->current_mode) {
+ ret = -EINVAL;
+ goto out;
+ }
drm_mode_set_name(mhdp_state->current_mode);
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 448afb86e05c..4d9896e14649 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -117,6 +117,7 @@ enum xe_guc_action {
XE_GUC_ACTION_ENTER_S_STATE = 0x501,
XE_GUC_ACTION_EXIT_S_STATE = 0x502,
XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
+ XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
XE_GUC_ACTION_SCHED_CONTEXT = 0x1000,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
@@ -142,6 +143,7 @@ enum xe_guc_action {
XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C,
XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER = 0x550D,
+ XE_GUC_ACTION_OPT_IN_FEATURE_KLV = 0x550E,
XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000,
XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002,
XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003,
@@ -240,4 +242,7 @@ enum xe_guc_g2g_type {
#define XE_G2G_DEREGISTER_TILE REG_GENMASK(15, 12)
#define XE_G2G_DEREGISTER_TYPE REG_GENMASK(11, 8)
+/* invalid type for XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR */
+#define XE_GUC_CAT_ERR_TYPE_INVALID 0xdeadbeef
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 7de8f827281f..89034bc97ec5 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -16,6 +16,8 @@
* +===+=======+==============================================================+
* | 0 | 31:16 | **KEY** - KLV key identifier |
* | | | - `GuC Self Config KLVs`_ |
+ * | | | - `GuC Opt In Feature KLVs`_ |
+ * | | | - `GuC Scheduling Policies KLVs`_ |
* | | | - `GuC VGT Policy KLVs`_ |
* | | | - `GuC VF Configuration KLVs`_ |
* | | | |
@@ -125,6 +127,44 @@ enum {
};
/**
+ * DOC: GuC Opt In Feature KLVs
+ *
+ * `GuC KLV`_ keys available for use with OPT_IN_FEATURE_KLV
+ *
+ * _`GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE` : 0x4001
+ * Adds an extra dword to the XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR G2H
+ * containing the type of the CAT error. On HW that does not support
+ * reporting the CAT error type, the extra dword is set to 0xdeadbeef.
+ */
+
+#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_KEY 0x4001
+#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_LEN 0u
+
+/**
+ * DOC: GuC Scheduling Policies KLVs
+ *
+ * `GuC KLV`_ keys available for use with UPDATE_SCHEDULING_POLICIES_KLV.
+ *
+ * _`GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD` : 0x1001
+ * Some platforms do not allow concurrent execution of RCS and CCS
+ * workloads from different address spaces. By default, the GuC prioritizes
+ * RCS submissions over CCS ones, which can lead to CCS workloads being
+ * significantly (or completely) starved of execution time. This KLV allows
+ * the driver to specify a quantum (in ms) and a ratio (percentage value
+ * between 0 and 100), and the GuC will prioritize the CCS for that
+ * percentage of each quantum. For example, specifying 100ms and 30% will
+ * make the GuC prioritize the CCS for 30ms of every 100ms.
+ * Note that this does not necessarly mean that RCS and CCS engines will
+ * only be active for their percentage of the quantum, as the restriction
+ * only kicks in if both classes are fully busy with non-compatible address
+ * spaces; i.e., if one engine is idle or running the same address space,
+ * a pending job on the other engine will still be submitted to the HW no
+ * matter what the ratio is
+ */
+#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_KEY 0x1001
+#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_LEN 2u
+
+/**
* DOC: GuC VGT Policy KLVs
*
* `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY.
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index fee22358cc09..e51169759614 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -151,6 +151,16 @@ err_lrc:
return err;
}
+static void __xe_exec_queue_fini(struct xe_exec_queue *q)
+{
+ int i;
+
+ q->ops->fini(q);
+
+ for (i = 0; i < q->width; ++i)
+ xe_lrc_put(q->lrc[i]);
+}
+
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
struct xe_hw_engine *hwe, u32 flags,
@@ -181,11 +191,13 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
if (xe_exec_queue_uses_pxp(q)) {
err = xe_pxp_exec_queue_add(xe->pxp, q);
if (err)
- goto err_post_alloc;
+ goto err_post_init;
}
return q;
+err_post_init:
+ __xe_exec_queue_fini(q);
err_post_alloc:
__xe_exec_queue_free(q);
return ERR_PTR(err);
@@ -283,13 +295,11 @@ void xe_exec_queue_destroy(struct kref *ref)
xe_exec_queue_put(eq);
}
- q->ops->fini(q);
+ q->ops->destroy(q);
}
void xe_exec_queue_fini(struct xe_exec_queue *q)
{
- int i;
-
/*
* Before releasing our ref to lrc and xef, accumulate our run ticks
* and wakeup any waiters.
@@ -298,9 +308,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
wake_up_var(&q->xef->exec_queue.pending_removal);
- for (i = 0; i < q->width; ++i)
- xe_lrc_put(q->lrc[i]);
-
+ __xe_exec_queue_fini(q);
__xe_exec_queue_free(q);
}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index cc1cffb5c87f..1c9d03f2a3e5 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -166,8 +166,14 @@ struct xe_exec_queue_ops {
int (*init)(struct xe_exec_queue *q);
/** @kill: Kill inflight submissions for backend */
void (*kill)(struct xe_exec_queue *q);
- /** @fini: Fini exec queue for submission backend */
+ /** @fini: Undoes the init() for submission backend */
void (*fini)(struct xe_exec_queue *q);
+ /**
+ * @destroy: Destroy exec queue for submission backend. The backend
+ * function must call xe_exec_queue_fini() (which will in turn call the
+ * fini() backend function) to ensure the queue is properly cleaned up.
+ */
+ void (*destroy)(struct xe_exec_queue *q);
/** @set_priority: Set priority for exec queue */
int (*set_priority)(struct xe_exec_queue *q,
enum xe_exec_queue_priority priority);
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 788f56b066b6..f83d421ac9d3 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -385,10 +385,20 @@ err_free:
return err;
}
-static void execlist_exec_queue_fini_async(struct work_struct *w)
+static void execlist_exec_queue_fini(struct xe_exec_queue *q)
+{
+ struct xe_execlist_exec_queue *exl = q->execlist;
+
+ drm_sched_entity_fini(&exl->entity);
+ drm_sched_fini(&exl->sched);
+
+ kfree(exl);
+}
+
+static void execlist_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_execlist_exec_queue *ee =
- container_of(w, struct xe_execlist_exec_queue, fini_async);
+ container_of(w, struct xe_execlist_exec_queue, destroy_async);
struct xe_exec_queue *q = ee->q;
struct xe_execlist_exec_queue *exl = q->execlist;
struct xe_device *xe = gt_to_xe(q->gt);
@@ -401,10 +411,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags);
- drm_sched_entity_fini(&exl->entity);
- drm_sched_fini(&exl->sched);
- kfree(exl);
-
xe_exec_queue_fini(q);
}
@@ -413,10 +419,10 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
/* NIY */
}
-static void execlist_exec_queue_fini(struct xe_exec_queue *q)
+static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
{
- INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
- queue_work(system_unbound_wq, &q->execlist->fini_async);
+ INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
+ queue_work(system_unbound_wq, &q->execlist->destroy_async);
}
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -467,6 +473,7 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
.init = execlist_exec_queue_init,
.kill = execlist_exec_queue_kill,
.fini = execlist_exec_queue_fini,
+ .destroy = execlist_exec_queue_destroy,
.set_priority = execlist_exec_queue_set_priority,
.set_timeslice = execlist_exec_queue_set_timeslice,
.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
index 415140936f11..92c4ba52db0c 100644
--- a/drivers/gpu/drm/xe/xe_execlist_types.h
+++ b/drivers/gpu/drm/xe/xe_execlist_types.h
@@ -42,7 +42,7 @@ struct xe_execlist_exec_queue {
bool has_run;
- struct work_struct fini_async;
+ struct work_struct destroy_async;
enum xe_exec_queue_priority active_priority;
struct list_head active_link;
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index e3517ce2e18c..eaf7569a7c1d 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -41,6 +41,7 @@
#include "xe_gt_topology.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_pc.h"
+#include "xe_guc_submit.h"
#include "xe_hw_fence.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_irq.h"
@@ -97,7 +98,7 @@ void xe_gt_sanitize(struct xe_gt *gt)
* FIXME: if xe_uc_sanitize is called here, on TGL driver will not
* reload
*/
- gt->uc.guc.submission_state.enabled = false;
+ xe_guc_submit_disable(&gt->uc.guc);
}
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 53a44702c04a..c15dc600dcae 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1600,7 +1600,6 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
u64 fair;
fair = div_u64(available, num_vfs);
- fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
fair = ALIGN_DOWN(fair, alignment);
#ifdef MAX_FAIR_LMEM
fair = min_t(u64, MAX_FAIR_LMEM, fair);
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index bac5471a1a78..b9d21fdaad48 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -29,6 +29,7 @@
#include "xe_guc_db_mgr.h"
#include "xe_guc_engine_activity.h"
#include "xe_guc_hwconfig.h"
+#include "xe_guc_klv_helpers.h"
#include "xe_guc_log.h"
#include "xe_guc_pc.h"
#include "xe_guc_relay.h"
@@ -570,6 +571,57 @@ err_deregister:
return err;
}
+static int __guc_opt_in_features_enable(struct xe_guc *guc, u64 addr, u32 num_dwords)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_OPT_IN_FEATURE_KLV,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ num_dwords
+ };
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+#define OPT_IN_MAX_DWORDS 16
+int xe_guc_opt_in_features_enable(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ CLASS(xe_guc_buf, buf)(&guc->buf, OPT_IN_MAX_DWORDS);
+ u32 count = 0;
+ u32 *klvs;
+ int ret;
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ klvs = xe_guc_buf_cpu_ptr(buf);
+
+ /*
+ * The extra CAT error type opt-in was added in GuC v70.17.0, which maps
+ * to compatibility version v1.7.0.
+ * Note that the GuC allows enabling this KLV even on platforms that do
+ * not support the extra type; in such case the returned type variable
+ * will be set to a known invalid value which we can check against.
+ */
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 7, 0))
+ klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_EXT_CAT_ERR_TYPE);
+
+ if (count) {
+ xe_assert(xe, count <= OPT_IN_MAX_DWORDS);
+
+ ret = __guc_opt_in_features_enable(guc, xe_guc_buf_flush(buf), count);
+ if (ret < 0) {
+ xe_gt_err(guc_to_gt(guc),
+ "failed to enable GuC opt-in features: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void guc_fini_hw(void *arg)
{
struct xe_guc *guc = arg;
@@ -763,15 +815,17 @@ int xe_guc_post_load_init(struct xe_guc *guc)
xe_guc_ads_populate_post_load(&guc->ads);
+ ret = xe_guc_opt_in_features_enable(guc);
+ if (ret)
+ return ret;
+
if (xe_guc_g2g_wanted(guc_to_xe(guc))) {
ret = guc_g2g_start(guc);
if (ret)
return ret;
}
- guc->submission_state.enabled = true;
-
- return 0;
+ return xe_guc_submit_enable(guc);
}
int xe_guc_reset(struct xe_guc *guc)
@@ -1465,7 +1519,7 @@ void xe_guc_sanitize(struct xe_guc *guc)
{
xe_uc_fw_sanitize(&guc->fw);
xe_guc_ct_disable(&guc->ct);
- guc->submission_state.enabled = false;
+ xe_guc_submit_disable(guc);
}
int xe_guc_reset_prepare(struct xe_guc *guc)
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index 58338be44558..4a66575f017d 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -33,6 +33,7 @@ int xe_guc_reset(struct xe_guc *guc);
int xe_guc_upload(struct xe_guc *guc);
int xe_guc_min_load_for_hwconfig(struct xe_guc *guc);
int xe_guc_enable_communication(struct xe_guc *guc);
+int xe_guc_opt_in_features_enable(struct xe_guc *guc);
int xe_guc_suspend(struct xe_guc *guc);
void xe_guc_notify(struct xe_guc *guc);
int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr);
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index a3f421e2adc0..c30c0e3ccbbb 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -35,8 +35,8 @@ struct xe_guc_exec_queue {
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
/** @lr_tdr: long running TDR worker */
struct work_struct lr_tdr;
- /** @fini_async: do final fini async from this worker */
- struct work_struct fini_async;
+ /** @destroy_async: do final destroy async from this worker */
+ struct work_struct destroy_async;
/** @resume_time: time of last resume */
u64 resume_time;
/** @state: GuC specific state for this xe_exec_queue */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index ef3e9e1588f7..18ddbb7b98a1 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -32,6 +32,7 @@
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
+#include "xe_guc_klv_helpers.h"
#include "xe_guc_submit_types.h"
#include "xe_hw_engine.h"
#include "xe_hw_fence.h"
@@ -316,6 +317,71 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
+/*
+ * Given that we want to guarantee enough RCS throughput to avoid missing
+ * frames, we set the yield policy to 20% of each 80ms interval.
+ */
+#define RC_YIELD_DURATION 80 /* in ms */
+#define RC_YIELD_RATIO 20 /* in percent */
+static u32 *emit_render_compute_yield_klv(u32 *emit)
+{
+ *emit++ = PREP_GUC_KLV_TAG(SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD);
+ *emit++ = RC_YIELD_DURATION;
+ *emit++ = RC_YIELD_RATIO;
+
+ return emit;
+}
+
+#define SCHEDULING_POLICY_MAX_DWORDS 16
+static int guc_init_global_schedule_policy(struct xe_guc *guc)
+{
+ u32 data[SCHEDULING_POLICY_MAX_DWORDS];
+ u32 *emit = data;
+ u32 count = 0;
+ int ret;
+
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
+ return 0;
+
+ *emit++ = XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
+
+ if (CCS_MASK(guc_to_gt(guc)))
+ emit = emit_render_compute_yield_klv(emit);
+
+ count = emit - data;
+ if (count > 1) {
+ xe_assert(guc_to_xe(guc), count <= SCHEDULING_POLICY_MAX_DWORDS);
+
+ ret = xe_guc_ct_send_block(&guc->ct, data, count);
+ if (ret < 0) {
+ xe_gt_err(guc_to_gt(guc),
+ "failed to enable GuC sheduling policies: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int xe_guc_submit_enable(struct xe_guc *guc)
+{
+ int ret;
+
+ ret = guc_init_global_schedule_policy(guc);
+ if (ret)
+ return ret;
+
+ guc->submission_state.enabled = true;
+
+ return 0;
+}
+
+void xe_guc_submit_disable(struct xe_guc *guc)
+{
+ guc->submission_state.enabled = false;
+}
+
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
{
int i;
@@ -1269,48 +1335,57 @@ rearm:
return DRM_GPU_SCHED_STAT_NOMINAL;
}
-static void __guc_exec_queue_fini_async(struct work_struct *w)
+static void guc_exec_queue_fini(struct xe_exec_queue *q)
+{
+ struct xe_guc_exec_queue *ge = q->guc;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ release_guc_id(guc, q);
+ xe_sched_entity_fini(&ge->entity);
+ xe_sched_fini(&ge->sched);
+
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(ge, rcu);
+}
+
+static void __guc_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_guc_exec_queue *ge =
- container_of(w, struct xe_guc_exec_queue, fini_async);
+ container_of(w, struct xe_guc_exec_queue, destroy_async);
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
- release_guc_id(guc, q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
- xe_sched_entity_fini(&ge->entity);
- xe_sched_fini(&ge->sched);
- /*
- * RCU free due sched being exported via DRM scheduler fences
- * (timeline name).
- */
- kfree_rcu(ge, rcu);
xe_exec_queue_fini(q);
+
xe_pm_runtime_put(guc_to_xe(guc));
}
-static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
+static void guc_exec_queue_destroy_async(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
- INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
+ INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async);
/* We must block on kernel engines so slabs are empty on driver unload */
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
- __guc_exec_queue_fini_async(&q->guc->fini_async);
+ __guc_exec_queue_destroy_async(&q->guc->destroy_async);
else
- queue_work(xe->destroy_wq, &q->guc->fini_async);
+ queue_work(xe->destroy_wq, &q->guc->destroy_async);
}
-static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
+static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q)
{
/*
* Might be done from within the GPU scheduler, need to do async as we
@@ -1319,7 +1394,7 @@ static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
* this we and don't really care when everything is fini'd, just that it
* is.
*/
- guc_exec_queue_fini_async(q);
+ guc_exec_queue_destroy_async(q);
}
static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
@@ -1333,7 +1408,7 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
if (exec_queue_registered(q))
disable_scheduling_deregister(guc, q);
else
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
@@ -1566,14 +1641,14 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
#define STATIC_MSG_CLEANUP 0
#define STATIC_MSG_SUSPEND 1
#define STATIC_MSG_RESUME 2
-static void guc_exec_queue_fini(struct xe_exec_queue *q)
+static void guc_exec_queue_destroy(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
guc_exec_queue_add_msg(q, msg, CLEANUP);
else
- __guc_exec_queue_fini(exec_queue_to_guc(q), q);
+ __guc_exec_queue_destroy(exec_queue_to_guc(q), q);
}
static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -1703,6 +1778,7 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
.init = guc_exec_queue_init,
.kill = guc_exec_queue_kill,
.fini = guc_exec_queue_fini,
+ .destroy = guc_exec_queue_destroy,
.set_priority = guc_exec_queue_set_priority,
.set_timeslice = guc_exec_queue_set_timeslice,
.set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
@@ -1724,7 +1800,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else if (exec_queue_destroyed(q))
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
if (q->guc->suspend_pending) {
set_exec_queue_suspended(q);
@@ -1981,7 +2057,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
@@ -2078,12 +2154,16 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
u32 guc_id;
+ u32 type = XE_GUC_CAT_ERR_TYPE_INVALID;
- if (unlikely(len < 1))
+ if (unlikely(!len || len > 2))
return -EPROTO;
guc_id = msg[0];
+ if (len == 2)
+ type = msg[1];
+
if (guc_id == GUC_ID_UNKNOWN) {
/*
* GuC uses GUC_ID_UNKNOWN if it can not map the CAT fault to any PF/VF
@@ -2097,8 +2177,19 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
if (unlikely(!q))
return -EPROTO;
- xe_gt_dbg(gt, "Engine memory cat error: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
- xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
+ /*
+ * The type is HW-defined and changes based on platform, so we don't
+ * decode it in the kernel and only check if it is valid.
+ * See bspec 54047 and 72187 for details.
+ */
+ if (type != XE_GUC_CAT_ERR_TYPE_INVALID)
+ xe_gt_dbg(gt,
+ "Engine memory CAT error [%u]: class=%s, logical_mask: 0x%x, guc_id=%d",
+ type, xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
+ else
+ xe_gt_dbg(gt,
+ "Engine memory CAT error: class=%s, logical_mask: 0x%x, guc_id=%d",
+ xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
trace_xe_exec_queue_memory_cat_error(q);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index 9b71a986c6ca..0d126b807c10 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -13,6 +13,8 @@ struct xe_exec_queue;
struct xe_guc;
int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids);
+int xe_guc_submit_enable(struct xe_guc *guc);
+void xe_guc_submit_disable(struct xe_guc *guc);
int xe_guc_submit_reset_prepare(struct xe_guc *guc);
void xe_guc_submit_reset_wait(struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
index b804234a6551..9e1236a9ec67 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
@@ -44,16 +44,18 @@ int xe_tile_sysfs_init(struct xe_tile *tile)
kt->tile = tile;
err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id);
- if (err) {
- kobject_put(&kt->base);
- return err;
- }
+ if (err)
+ goto err_object;
tile->sysfs = &kt->base;
err = xe_vram_freq_sysfs_init(tile);
if (err)
- return err;
+ goto err_object;
return devm_add_action_or_reset(xe->drm.dev, tile_sysfs_fini, tile);
+
+err_object:
+ kobject_put(&kt->base);
+ return err;
}
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 3a8751a8b92d..5c45b0f072a4 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -165,6 +165,10 @@ static int vf_uc_init_hw(struct xe_uc *uc)
uc->guc.submission_state.enabled = true;
+ err = xe_guc_opt_in_features_enable(&uc->guc);
+ if (err)
+ return err;
+
err = xe_gt_record_default_lrcs(uc_to_gt(uc));
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 84052b98002d..92ce7374a79c 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -240,8 +240,8 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
pfence = xe_preempt_fence_create(q, q->lr.context,
++q->lr.seqno);
- if (!pfence) {
- err = -ENOMEM;
+ if (IS_ERR(pfence)) {
+ err = PTR_ERR(pfence);
goto out_fini;
}
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index ccbab3a4811a..a1086b6d1f11 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -551,6 +551,7 @@ struct gcr3_tbl_info {
};
struct amd_io_pgtable {
+ seqcount_t seqcount; /* Protects root/mode update */
struct io_pgtable pgtbl;
int mode;
u64 *root;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 7add9bcf45dc..eef55aa4143c 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1450,12 +1450,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
PCI_FUNC(e->devid));
devid = e->devid;
- for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
- if (alias)
+ if (alias) {
+ for (dev_i = devid_start; dev_i <= devid; ++dev_i)
pci_seg->alias_table[dev_i] = devid_to;
+ set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
}
set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
- set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
break;
case IVHD_DEV_SPECIAL: {
u8 handle, type;
@@ -3048,7 +3048,8 @@ static int __init early_amd_iommu_init(void)
if (!boot_cpu_has(X86_FEATURE_CX16)) {
pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/*
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 4d308c071134..5c6fdf38636a 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/seqlock.h>
#include <asm/barrier.h>
@@ -130,8 +131,11 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
+ write_seqcount_begin(&pgtable->seqcount);
pgtable->root = pte;
pgtable->mode += 1;
+ write_seqcount_end(&pgtable->seqcount);
+
amd_iommu_update_and_flush_device_table(domain);
pte = NULL;
@@ -153,6 +157,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
{
unsigned long last_addr = address + (page_size - 1);
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
+ unsigned int seqcount;
int level, end_lvl;
u64 *pte, *page;
@@ -170,8 +175,14 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
}
- level = pgtable->mode - 1;
- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ do {
+ seqcount = read_seqcount_begin(&pgtable->seqcount);
+
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ } while (read_seqcount_retry(&pgtable->seqcount, seqcount));
+
+
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@@ -249,6 +260,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
unsigned long *page_size)
{
int level;
+ unsigned int seqcount;
u64 *pte;
*page_size = 0;
@@ -256,8 +268,12 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
if (address > PM_LEVEL_SIZE(pgtable->mode))
return NULL;
- level = pgtable->mode - 1;
- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ do {
+ seqcount = read_seqcount_begin(&pgtable->seqcount);
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ } while (read_seqcount_retry(&pgtable->seqcount, seqcount));
+
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@@ -541,6 +557,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
+ seqcount_init(&pgtable->seqcount);
cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap;
cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 34dd175a331d..3dd4d73fcb5d 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1592,6 +1592,10 @@ static void switch_to_super_page(struct dmar_domain *domain,
unsigned long lvl_pages = lvl_to_nr_pages(level);
struct dma_pte *pte = NULL;
+ if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) ||
+ !IS_ALIGNED(end_pfn + 1, lvl_pages)))
+ return;
+
while (start_pfn <= end_pfn) {
if (!pte)
pte = pfn_to_dma_pte(domain, start_pfn, &level,
@@ -1667,7 +1671,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long pages_to_remove;
pteval |= DMA_PTE_LARGE_PAGE;
- pages_to_remove = min_t(unsigned long, nr_pages,
+ pages_to_remove = min_t(unsigned long,
+ round_down(nr_pages, lvl_pages),
nr_pte_to_next_page(pte) * lvl_pages);
end_pfn = iov_pfn + pages_to_remove - 1;
switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 433b59f43530..cc9ec636a7a2 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -611,6 +611,23 @@ static u64 get_iota_region_flag(struct s390_domain *domain)
}
}
+static bool reg_ioat_propagate_error(int cc, u8 status)
+{
+ /*
+ * If the device is in the error state the reset routine
+ * will register the IOAT of the newly set domain on re-enable
+ */
+ if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ return false;
+ /*
+ * If the device was removed treat registration as success
+ * and let the subsequent error event trigger tear down.
+ */
+ if (cc == ZPCI_CC_INVAL_HANDLE)
+ return false;
+ return cc != ZPCI_CC_OK;
+}
+
static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
struct iommu_domain *domain, u8 *status)
{
@@ -695,7 +712,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
- if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev->dma_table = s390_domain->dma_table;
zdev_s390_domain_update(zdev, domain);
@@ -1031,7 +1048,8 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
lockdep_assert_held(&zdev->dom_lock);
- if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
+ if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
+ zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY)
return NULL;
s390_domain = to_s390_domain(zdev->s390_domain);
@@ -1122,12 +1140,7 @@ static int s390_attach_dev_identity(struct iommu_domain *domain,
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
-
- /*
- * If the device is undergoing error recovery the reset code
- * will re-establish the new domain.
- */
- if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev_s390_domain_update(zdev, domain);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9835f2fe26e9..1e09b4860a45 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3810,8 +3810,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
struct raid_set *rs = ti->private;
unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
- limits->io_min = chunk_size_bytes;
- limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
+ if (chunk_size_bytes) {
+ limits->io_min = chunk_size_bytes;
+ limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
+ }
}
static void raid_presuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 5bbbdf8fc1bd..2313dbaf6424 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -456,11 +456,15 @@ static void stripe_io_hints(struct dm_target *ti,
struct queue_limits *limits)
{
struct stripe_c *sc = ti->private;
- unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
+ unsigned int io_min, io_opt;
limits->chunk_sectors = sc->chunk_size;
- limits->io_min = chunk_size;
- limits->io_opt = chunk_size * sc->stripes;
+
+ if (!check_shl_overflow(sc->chunk_size, SECTOR_SHIFT, &io_min) &&
+ !check_mul_overflow(io_min, sc->stripes, &io_opt)) {
+ limits->io_min = io_min;
+ limits->io_opt = io_opt;
+ }
}
static struct target_type stripe_target = {
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 101f36de7b63..8165fa4d0d93 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
host->pio_ptr = NULL;
host->pio_size = 0;
} else {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 3a1de477e9af..b0f91cc9e40e 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -283,6 +283,8 @@
#define PCIE_GLI_9767_UHS2_CTL2_ZC_VALUE 0xb
#define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL BIT(6)
#define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL_VALUE 0x1
+#define PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN BIT(13)
+#define PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE BIT(14)
#define GLI_MAX_TUNING_LOOP 40
@@ -1179,6 +1181,65 @@ static void gl9767_set_low_power_negotiation(struct pci_dev *pdev, bool enable)
gl9767_vhs_read(pdev);
}
+static void sdhci_gl9767_uhs2_phy_reset(struct sdhci_host *host, bool assert)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value, set, clr;
+
+ if (assert) {
+ /* Assert reset, set RESETN and clean RESETN_VALUE */
+ set = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN;
+ clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE;
+ } else {
+ /* De-assert reset, clean RESETN and set RESETN_VALUE */
+ set = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE;
+ clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN;
+ }
+
+ gl9767_vhs_write(pdev);
+ pci_read_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, &value);
+ value |= set;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value);
+ value &= ~clr;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value);
+ gl9767_vhs_read(pdev);
+}
+
+static void __gl9767_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
+{
+ u8 pwr = 0;
+
+ if (mode != MMC_POWER_OFF) {
+ pwr = sdhci_get_vdd_value(vdd);
+ if (!pwr)
+ WARN(1, "%s: Invalid vdd %#x\n",
+ mmc_hostname(host->mmc), vdd);
+ pwr |= SDHCI_VDD2_POWER_180;
+ }
+
+ if (host->pwr == pwr)
+ return;
+
+ host->pwr = pwr;
+
+ if (pwr == 0) {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ } else {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+ pwr |= SDHCI_POWER_ON;
+ sdhci_writeb(host, pwr & 0xf, SDHCI_POWER_CONTROL);
+ usleep_range(5000, 6250);
+
+ /* Assert reset */
+ sdhci_gl9767_uhs2_phy_reset(host, true);
+ pwr |= SDHCI_VDD2_POWER_ON;
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ usleep_range(5000, 6250);
+ }
+}
+
static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
@@ -1205,6 +1266,11 @@ static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
}
sdhci_enable_clk(host, clk);
+
+ if (mmc_card_uhs2(host->mmc))
+ /* De-assert reset */
+ sdhci_gl9767_uhs2_phy_reset(host, false);
+
gl9767_set_low_power_negotiation(pdev, true);
}
@@ -1476,7 +1542,7 @@ static void sdhci_gl9767_set_power(struct sdhci_host *host, unsigned char mode,
gl9767_vhs_read(pdev);
sdhci_gli_overcurrent_event_enable(host, false);
- sdhci_uhs2_set_power(host, mode, vdd);
+ __gl9767_uhs2_set_power(host, mode, vdd);
sdhci_gli_overcurrent_event_enable(host, true);
} else {
gl9767_vhs_write(pdev);
diff --git a/drivers/mmc/host/sdhci-uhs2.c b/drivers/mmc/host/sdhci-uhs2.c
index 0efeb9d0c376..c459a08d01da 100644
--- a/drivers/mmc/host/sdhci-uhs2.c
+++ b/drivers/mmc/host/sdhci-uhs2.c
@@ -295,7 +295,8 @@ static void __sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
sdhci_uhs2_set_power(host, ios->power_mode, ios->vdd);
- sdhci_set_clock(host, host->clock);
+ host->ops->set_clock(host, ios->clock);
+ host->clock = ios->clock;
}
static int sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e116f2db34d5..be701eb25fec 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2367,23 +2367,6 @@ void sdhci_set_ios_common(struct mmc_host *mmc, struct mmc_ios *ios)
(ios->power_mode == MMC_POWER_UP) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
sdhci_enable_preset_value(host, false);
-
- if (!ios->clock || ios->clock != host->clock) {
- host->ops->set_clock(host, ios->clock);
- host->clock = ios->clock;
-
- if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
- host->clock) {
- host->timeout_clk = mmc->actual_clock ?
- mmc->actual_clock / 1000 :
- host->clock / 1000;
- mmc->max_busy_timeout =
- host->ops->get_max_timeout_count ?
- host->ops->get_max_timeout_count(host) :
- 1 << 27;
- mmc->max_busy_timeout /= host->timeout_clk;
- }
- }
}
EXPORT_SYMBOL_GPL(sdhci_set_ios_common);
@@ -2410,6 +2393,23 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_set_ios_common(mmc, ios);
+ if (!ios->clock || ios->clock != host->clock) {
+ host->ops->set_clock(host, ios->clock);
+ host->clock = ios->clock;
+
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
+ host->clock) {
+ host->timeout_clk = mmc->actual_clock ?
+ mmc->actual_clock / 1000 :
+ host->clock / 1000;
+ mmc->max_busy_timeout =
+ host->ops->get_max_timeout_count ?
+ host->ops->get_max_timeout_count(host) :
+ 1 << 27;
+ mmc->max_busy_timeout /= host->timeout_clk;
+ }
+ }
+
if (host->ops->set_power)
host->ops->set_power(host, ios->power_mode, ios->vdd);
else
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c4d53e8e7c15..e23195dd7477 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2115,6 +2115,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
} else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ bond_has_slaves(bond) &&
memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
/* Set slave to random address to avoid duplicate mac
* address in later fail over.
@@ -3338,7 +3339,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
/* Find out through which dev should the packet go */
memset(&fl6, 0, sizeof(struct flowi6));
fl6.daddr = targets[i];
- fl6.flowi6_oif = bond->dev->ifindex;
dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
if (dst->error) {
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index a9040c42d2ff..6e97a5a7daaf 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4230,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_bnx2x_delete_wait(dev, 0);
- cancel_delayed_work(&cp->delete_task);
- flush_workqueue(cnic_wq);
+ cancel_delayed_work_sync(&cp->delete_task);
if (atomic_read(&cp->iscsi_conn) != 0)
netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index de8a6ce86ad7..12105ffb5dac 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -126,7 +126,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */
- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
iq->iqcmd_64B = (conf->instr_type == 64);
oct->fn_list.setup_iq_regs(oct, iq_no);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 4643a3380618..b1e1ad9e4b48 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -2736,7 +2736,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
goto err_get_attr;
}
- ethsw->bpid = dpbp_attrs.id;
+ ethsw->bpid = dpbp_attrs.bpid;
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index c006f716a3bd..ca7517a68a2c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -947,9 +947,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (!eop_desc)
break;
- /* prevent any other reads prior to eop_desc */
- smp_rmb();
-
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
if (tx_head == tx_desc)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index c50cf3ad190e..4766597ac555 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -865,10 +865,6 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
sinfo->xdp_frags_size += size;
- /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
- * can pop off frags but driver has to handle it on its own
- */
- rx_ring->nr_frags = sinfo->nr_frags;
if (page_is_pfmemalloc(rx_buf->page))
xdp_buff_set_frag_pfmemalloc(xdp);
@@ -939,20 +935,20 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
/**
* ice_get_pgcnts - grab page_count() for gathered fragments
* @rx_ring: Rx descriptor ring to store the page counts on
+ * @ntc: the next to clean element (not included in this frame!)
*
* This function is intended to be called right before running XDP
* program so that the page recycling mechanism will be able to take
* a correct decision regarding underlying pages; this is done in such
* way as XDP program can change the refcount of page
*/
-static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
+static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc)
{
- u32 nr_frags = rx_ring->nr_frags + 1;
u32 idx = rx_ring->first_desc;
struct ice_rx_buf *rx_buf;
u32 cnt = rx_ring->count;
- for (int i = 0; i < nr_frags; i++) {
+ while (idx != ntc) {
rx_buf = &rx_ring->rx_buf[idx];
rx_buf->pgcnt = page_count(rx_buf->page);
@@ -1125,62 +1121,51 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
}
/**
- * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
+ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame
* @rx_ring: Rx ring with all the auxiliary data
* @xdp: XDP buffer carrying linear + frags part
- * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
- * @ntc: a current next_to_clean value to be stored at rx_ring
+ * @ntc: the next to clean element (not included in this frame!)
* @verdict: return code from XDP program execution
*
- * Walk through gathered fragments and satisfy internal page
- * recycle mechanism; we take here an action related to verdict
- * returned by XDP program;
+ * Called after XDP program is completed, or on error with verdict set to
+ * ICE_XDP_CONSUMED.
+ *
+ * Walk through buffers from first_desc to the end of the frame, releasing
+ * buffers and satisfying internal page recycle mechanism. The action depends
+ * on verdict from XDP program.
*/
static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- u32 *xdp_xmit, u32 ntc, u32 verdict)
+ u32 ntc, u32 verdict)
{
- u32 nr_frags = rx_ring->nr_frags + 1;
u32 idx = rx_ring->first_desc;
u32 cnt = rx_ring->count;
- u32 post_xdp_frags = 1;
struct ice_rx_buf *buf;
- int i;
+ u32 xdp_frags = 0;
+ int i = 0;
if (unlikely(xdp_buff_has_frags(xdp)))
- post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
- for (i = 0; i < post_xdp_frags; i++) {
+ while (idx != ntc) {
buf = &rx_ring->rx_buf[idx];
+ if (++idx == cnt)
+ idx = 0;
- if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ /* An XDP program could release fragments from the end of the
+ * buffer. For these, we need to keep the pagecnt_bias as-is.
+ * To do this, only adjust pagecnt_bias for fragments up to
+ * the total remaining after the XDP program has run.
+ */
+ if (verdict != ICE_XDP_CONSUMED)
ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- *xdp_xmit |= verdict;
- } else if (verdict & ICE_XDP_CONSUMED) {
+ else if (i++ <= xdp_frags)
buf->pagecnt_bias++;
- } else if (verdict == ICE_XDP_PASS) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- }
ice_put_rx_buf(rx_ring, buf);
-
- if (++idx == cnt)
- idx = 0;
- }
- /* handle buffers that represented frags released by XDP prog;
- * for these we keep pagecnt_bias as-is; refcount from struct page
- * has been decremented within XDP prog and we do not have to increase
- * the biased refcnt
- */
- for (; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[idx];
- ice_put_rx_buf(rx_ring, buf);
- if (++idx == cnt)
- idx = 0;
}
xdp->data = NULL;
rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
}
/**
@@ -1260,6 +1245,10 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
+ /* Increment ntc before calls to ice_put_rx_mbuf() */
+ if (++ntc == cnt)
+ ntc = 0;
+
if (!xdp->data) {
void *hard_start;
@@ -1268,24 +1257,23 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
xdp_buff_clear_frags_flag(xdp);
} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
- ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
+ ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED);
break;
}
- if (++ntc == cnt)
- ntc = 0;
/* skip if it is NOP desc */
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_get_pgcnts(rx_ring);
+ ice_get_pgcnts(rx_ring, ntc);
xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+ ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
+ xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR);
continue;
construct_skb:
@@ -1298,7 +1286,7 @@ construct_skb:
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
xdp_verdict = ICE_XDP_CONSUMED;
}
- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+ ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
if (!skb)
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index a4b1e9514632..07155e615f75 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -358,7 +358,6 @@ struct ice_rx_ring {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
- u32 nr_frags;
u16 max_frame;
u16 rx_buf_len;
dma_addr_t dma; /* physical address of ring */
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 859a15e4ccba..1bbe7f72757c 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -343,6 +343,7 @@ struct igc_adapter {
/* LEDs */
struct mutex led_mutex;
struct igc_led_classdev *leds;
+ bool leds_available;
};
void igc_up(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 1b4465d6b2b7..5b8f9b512148 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7301,8 +7301,14 @@ static int igc_probe(struct pci_dev *pdev,
if (IS_ENABLED(CONFIG_IGC_LEDS)) {
err = igc_led_setup(adapter);
- if (err)
- goto err_register;
+ if (err) {
+ netdev_warn_once(netdev,
+ "LED init failed (%d); continuing without LED support\n",
+ err);
+ adapter->leds_available = false;
+ } else {
+ adapter->leds_available = true;
+ }
}
return 0;
@@ -7358,7 +7364,7 @@ static void igc_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->watchdog_task);
hrtimer_cancel(&adapter->hrtimer);
- if (IS_ENABLED(CONFIG_IGC_LEDS))
+ if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available)
igc_led_free(adapter);
/* Release control of h/w to f/w. If f/w is AMT enabled, this
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cba860f0e1f1..d5c421451f31 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6801,6 +6801,13 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
break;
}
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_init(&hw->aci.lock);
+
#ifdef IXGBE_FCOE
/* FCoE support exists, always init the FCoE lock */
spin_lock_init(&adapter->fcoe.lock);
@@ -11474,10 +11481,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- /* Make sure the SWFW semaphore is in a valid state */
- if (hw->mac.ops.init_swfw_sync)
- hw->mac.ops.init_swfw_sync(hw);
-
if (ixgbe_check_fw_error(adapter))
return ixgbe_recovery_probe(adapter);
@@ -11681,8 +11684,6 @@ skip_sriov:
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ixgbe_mac_set_default_filter(adapter);
- if (hw->mac.type == ixgbe_mac_e610)
- mutex_init(&hw->aci.lock);
timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
if (ixgbe_removed(hw->hw_addr)) {
@@ -11838,9 +11839,9 @@ err_register:
devl_unlock(adapter->devlink);
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
+err_sw_init:
if (hw->mac.type == ixgbe_mac_e610)
mutex_destroy(&adapter->hw.aci.lock);
-err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
@@ -11891,10 +11892,8 @@ static void ixgbe_remove(struct pci_dev *pdev)
set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
- if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
ixgbe_disable_link_status_events(adapter);
- mutex_destroy(&adapter->hw.aci.lock);
- }
if (adapter->mii_bus)
mdiobus_unregister(adapter->mii_bus);
@@ -11954,6 +11953,9 @@ static void ixgbe_remove(struct pci_dev *pdev)
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
+
if (disable_dev)
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 24499bb36c00..bcea3fc26a8c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1124,11 +1124,24 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features
return err;
}
+static bool octep_is_vf_valid(struct octep_device *oct, int vf)
+{
+ if (vf >= CFG_GET_ACTIVE_VFS(oct->conf)) {
+ netdev_err(oct->netdev, "Invalid VF ID %d\n", vf);
+ return false;
+ }
+
+ return true;
+}
+
static int octep_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi)
{
struct octep_device *oct = netdev_priv(dev);
+ if (!octep_is_vf_valid(oct, vf))
+ return -EINVAL;
+
ivi->vf = vf;
ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr);
ivi->spoofchk = true;
@@ -1143,6 +1156,9 @@ static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
struct octep_device *oct = netdev_priv(dev);
int err;
+ if (!octep_is_vf_valid(oct, vf))
+ return -EINVAL;
+
if (!is_valid_ether_addr(mac)) {
dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac);
return -EADDRNOTAVAIL;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
index ebecdd29f3bd..0867fab61b19 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -196,6 +196,7 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
vf_id);
return;
}
+ ether_addr_copy(oct->vf_info[vf_id].mac_addr, rsp->s_set_mac.mac_addr);
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
}
@@ -205,6 +206,8 @@ static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id,
{
int err;
+ /* Reset VF-specific information maintained by the PF */
+ memset(&oct->vf_info[vf_id], 0, sizeof(struct octep_pfvf_info));
err = octep_ctrl_net_dev_remove(oct, vf_id);
if (err) {
rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 63130ba37e9d..69b435ed8fbb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -491,7 +491,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
- cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 9560fcba643f..ac65e3191480 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -92,6 +92,7 @@ enum {
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
MLX5E_ACCEL_FS_POL_FT_LEVEL,
+ MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index ffcd0cdeb775..23703f28386a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -185,6 +185,7 @@ struct mlx5e_ipsec_rx_create_attr {
u32 family;
int prio;
int pol_level;
+ int pol_miss_level;
int sa_level;
int status_level;
enum mlx5_flow_namespace_type chains_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 98b6a3a623f9..65dc3529283b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -747,6 +747,7 @@ static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
attr->family = family;
attr->prio = MLX5E_NIC_PRIO;
attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
+ attr->pol_miss_level = MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL;
attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
@@ -833,7 +834,7 @@ static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
- ft_attr.level = attr->pol_level;
+ ft_attr.level = attr->pol_miss_level;
ft_attr.prio = attr->prio;
ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e39c51cfc8e6..f0142d32b648 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -136,8 +136,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
if (up) {
netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
- NULL, NULL, NULL);
} else {
netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 63a7a788fb0d..cd0242eb008c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1506,12 +1506,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
static int
mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int err;
+
+ netdev = mlx5_uplink_netdev_get(dev);
+ if (!netdev)
+ return 0;
+ priv = netdev_priv(netdev);
rpriv->netdev = priv->netdev;
- return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
- rpriv);
+ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+ rpriv);
+ mlx5_uplink_netdev_put(dev, netdev);
+ return err;
}
static void
@@ -1638,8 +1647,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
- void *ppriv = priv->ppriv;
+ struct mlx5e_priv *priv;
+ void *ppriv;
+
+ if (!netdev) {
+ ppriv = rpriv;
+ goto free_ppriv;
+ }
+
+ priv = netdev_priv(netdev);
+ ppriv = priv->ppriv;
if (rep->vport == MLX5_VPORT_UPLINK) {
mlx5e_vport_uplink_rep_unload(rpriv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index ad9f6fca9b6a..c6476e943e98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -743,6 +743,7 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
speed = lksettings.base.speed;
out:
+ mlx5_uplink_netdev_put(mdev, slave);
return speed;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 29ce09af59ae..3b57ef6b3de3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -114,9 +114,9 @@
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
- * {IPsec RoCE MPV,Alias table},IPsec RoCE policy
+ * IPsec policy miss, {IPsec RoCE MPV,Alias table},IPsec RoCE policy
*/
-#define KERNEL_NIC_PRIO_NUM_LEVELS 10
+#define KERNEL_NIC_PRIO_NUM_LEVELS 11
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc, and one more for promisc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 37d5f445598c..a7486e6d0d5e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -52,7 +52,20 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
{
- return mdev->mlx5e_res.uplink_netdev;
+ struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res;
+ struct net_device *netdev;
+
+ mutex_lock(&mlx5e_res->uplink_netdev_lock);
+ netdev = mlx5e_res->uplink_netdev;
+ netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL);
+ mutex_unlock(&mlx5e_res->uplink_netdev_lock);
+ return netdev;
+}
+
+static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
+{
+ netdev_put(netdev, &mdev->mlx5e_res.tracker);
}
struct mlx5_sd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 2d7adf7444ba..aa9f2b0a77d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -1170,7 +1170,11 @@ const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
force_legacy);
i = find_first_bit(&temp, max_size);
- if (i < max_size)
+
+ /* mlx5e_link_info has holes. Check speed
+ * is not zero as indication of one.
+ */
+ if (i < max_size && table[i].speed)
return &table[i];
return NULL;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 56d5464222d9..cdbf82affa7b 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
struct rx_info *info = &dev->rx_info;
unsigned next_rx;
- int rx_rc, len;
+ int len;
u32 cmdsts;
__le32 *desc;
unsigned long flags;
@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
if (likely(CMDSTS_OK & cmdsts)) {
#endif
skb_put(skb, len);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
goto netdev_mangle_me_harder_failed;
+ }
if (cmdsts & CMDSTS_DEST_MULTI)
ndev->stats.multicast++;
ndev->stats.rx_packets++;
@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
}
#endif
- rx_rc = netif_rx(skb);
- if (NET_RX_DROP == rx_rc) {
-netdev_mangle_me_harder_failed:
- ndev->stats.rx_dropped++;
- }
+ netif_rx(skb);
} else {
dev_kfree_skb_irq(skb);
}
+netdev_mangle_me_harder_failed:
nr++;
next_rx = info->next_rx;
desc = info->descs + (DESC_SIZE * next_rx);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 9c3d3dd2f847..1f0cea3cae92 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -4462,10 +4462,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Add override window info to buffer */
+ /* Add override window info to buffer, preventing buffer overflow */
override_window_dwords =
- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
+ PROTECTION_OVERRIDE_DEPTH_DWORDS);
if (override_window_dwords) {
addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
offset += qed_grc_dump_addr_range(p_hwfn,
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 8e6ce16ab5b8..c9e2dca30831 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -1731,7 +1731,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
{
- if (test_bit(MT76_MCU_RESET, &dev->phy.state))
+ if (test_bit(MT76_MCU_RESET, &dev->phy.state) || !wcid->sta)
return;
spin_lock_bh(&dev->sta_poll_lock);
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
index 131388886acb..cfabd5aebb54 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
@@ -41,10 +41,10 @@ static const struct wilc_cfg_word g_cfg_word[] = {
};
static const struct wilc_cfg_str g_cfg_str[] = {
- {WID_FIRMWARE_VERSION, NULL},
- {WID_MAC_ADDR, NULL},
- {WID_ASSOC_RES_INFO, NULL},
- {WID_NIL, NULL}
+ {WID_FIRMWARE_VERSION, 0, NULL},
+ {WID_MAC_ADDR, 0, NULL},
+ {WID_ASSOC_RES_INFO, 0, NULL},
+ {WID_NIL, 0, NULL}
};
#define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R'
@@ -147,44 +147,58 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
switch (FIELD_GET(WILC_WID_TYPE, wid)) {
case WID_CHAR:
+ len = 3;
+ if (len + 2 > size)
+ return;
+
while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
i++;
if (cfg->b[i].id == wid)
cfg->b[i].val = info[4];
- len = 3;
break;
case WID_SHORT:
+ len = 4;
+ if (len + 2 > size)
+ return;
+
while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
i++;
if (cfg->hw[i].id == wid)
cfg->hw[i].val = get_unaligned_le16(&info[4]);
- len = 4;
break;
case WID_INT:
+ len = 6;
+ if (len + 2 > size)
+ return;
+
while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
i++;
if (cfg->w[i].id == wid)
cfg->w[i].val = get_unaligned_le32(&info[4]);
- len = 6;
break;
case WID_STR:
+ len = 2 + get_unaligned_le16(&info[2]);
+
while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
i++;
- if (cfg->s[i].id == wid)
+ if (cfg->s[i].id == wid) {
+ if (len > cfg->s[i].len || (len + 2 > size))
+ return;
+
memcpy(cfg->s[i].str, &info[2],
- get_unaligned_le16(&info[2]) + 2);
+ len);
+ }
- len = 2 + get_unaligned_le16(&info[2]);
break;
default:
@@ -384,12 +398,15 @@ int wilc_wlan_cfg_init(struct wilc *wl)
/* store the string cfg parameters */
wl->cfg.s[i].id = WID_FIRMWARE_VERSION;
wl->cfg.s[i].str = str_vals->firmware_version;
+ wl->cfg.s[i].len = sizeof(str_vals->firmware_version);
i++;
wl->cfg.s[i].id = WID_MAC_ADDR;
wl->cfg.s[i].str = str_vals->mac_address;
+ wl->cfg.s[i].len = sizeof(str_vals->mac_address);
i++;
wl->cfg.s[i].id = WID_ASSOC_RES_INFO;
wl->cfg.s[i].str = str_vals->assoc_rsp;
+ wl->cfg.s[i].len = sizeof(str_vals->assoc_rsp);
i++;
wl->cfg.s[i].id = WID_NIL;
wl->cfg.s[i].str = NULL;
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
index 7038b74f8e8f..5ae74bced7d7 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
@@ -24,12 +24,13 @@ struct wilc_cfg_word {
struct wilc_cfg_str {
u16 id;
+ u16 len;
u8 *str;
};
struct wilc_cfg_str_vals {
- u8 mac_address[7];
- u8 firmware_version[129];
+ u8 mac_address[8];
+ u8 firmware_version[130];
u8 assoc_rsp[WILC_MAX_ASSOC_RESP_FRAME_SIZE];
};
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 895fb163d48e..5395623d2ba6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -903,6 +903,15 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
u32 upper, lower;
u64 ref48;
+ /* only type1 and type 2 PI formats have a reftag */
+ switch (ns->head->pi_type) {
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ break;
+ default:
+ return;
+ }
+
/* both rw and write zeroes share the same reftag format */
switch (ns->head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
@@ -942,13 +951,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
if (nvme_ns_has_pi(ns->head)) {
cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
-
- switch (ns->head->pi_type) {
- case NVME_NS_DPS_PI_TYPE1:
- case NVME_NS_DPS_PI_TYPE2:
- nvme_set_ref_tag(ns, cmnd, req);
- break;
- }
+ nvme_set_ref_tag(ns, cmnd, req);
}
return BLK_STS_OK;
@@ -1039,6 +1042,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
return BLK_STS_NOTSUPP;
control |= NVME_RW_PRINFO_PRACT;
+ nvme_set_ref_tag(ns, cmnd, req);
}
if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 441cdf83f5a4..d6f24c7d1562 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -304,7 +304,13 @@ static void __exit omap_cf_remove(struct platform_device *pdev)
kfree(cf);
}
-static struct platform_driver omap_cf_driver = {
+/*
+ * omap_cf_remove() lives in .exit.text. For drivers registered via
+ * platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver omap_cf_driver __refdata = {
.driver = {
.name = driver_name,
},
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index e6726be5890e..ce177c379941 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -147,7 +147,12 @@ static struct quirk_entry quirk_asus_ignore_fan = {
};
static struct quirk_entry quirk_asus_zenbook_duo_kbd = {
- .ignore_key_wlan = true,
+ .key_wlan_event = ASUS_WMI_KEY_IGNORE,
+};
+
+static struct quirk_entry quirk_asus_z13 = {
+ .key_wlan_event = ASUS_WMI_KEY_ARMOURY,
+ .tablet_switch_mode = asus_wmi_kbd_dock_devid,
};
static int dmi_matched(const struct dmi_system_id *dmi)
@@ -539,6 +544,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_zenbook_duo_kbd,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS ROG Z13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow Z13"),
+ },
+ .driver_data = &quirk_asus_z13,
+ },
{},
};
@@ -636,6 +650,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_IGNORE, 0xCF, }, /* AC mode */
{ KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
{ KE_KEY, 0xBD, { KEY_PROG2 } }, /* Lid flip action on ROG xflow laptops */
+ { KE_KEY, ASUS_WMI_KEY_ARMOURY, { KEY_PROG3 } },
{ KE_END, 0},
};
@@ -655,9 +670,11 @@ static void asus_nb_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
if (atkbd_reports_vol_keys)
*code = ASUS_WMI_KEY_IGNORE;
break;
- case 0x5F: /* Wireless console Disable */
- if (quirks->ignore_key_wlan)
- *code = ASUS_WMI_KEY_IGNORE;
+ case 0x5D: /* Wireless console Toggle */
+ case 0x5E: /* Wireless console Enable / Keyboard Attach, Detach */
+ case 0x5F: /* Wireless console Disable / Special Key */
+ if (quirks->key_wlan_event)
+ *code = quirks->key_wlan_event;
break;
}
}
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 018dfde4025e..5cd4392b964e 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -18,6 +18,7 @@
#include <linux/i8042.h>
#define ASUS_WMI_KEY_IGNORE (-1)
+#define ASUS_WMI_KEY_ARMOURY 0xffff01
#define ASUS_WMI_BRN_DOWN 0x2e
#define ASUS_WMI_BRN_UP 0x2f
@@ -40,7 +41,7 @@ struct quirk_entry {
bool wmi_force_als_set;
bool wmi_ignore_fan;
bool filter_i8042_e1_extended_codes;
- bool ignore_key_wlan;
+ int key_wlan_event;
enum asus_wmi_tablet_switch_mode tablet_switch_mode;
int wapf;
/*
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 93dcebbe1141..ad2d9ecf32a5 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1919,8 +1919,8 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
- if ((cache.flags & 0xff) == 0xff)
- cache.flags = -1; /* read error */
+ if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff)
+ cache.flags = -ENODEV; /* bq27000 hdq read error */
if (cache.flags >= 0) {
cache.capacity = bq27xxx_battery_read_soc(di);