diff options
author | Jiri Kosina <jkosina@suse.com> | 2025-03-26 13:42:07 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.com> | 2025-03-26 13:42:07 +0100 |
commit | b3cc7428a32202936904b5b07cf9f135025bafd6 (patch) | |
tree | d4a1a6180ac5939fccd92acd6f8d7d1388575c4a /drivers/fpga/dfl-fme-error.c | |
parent | db52926fb0be40e1d588a346df73f5ea3a34a4c6 (diff) | |
parent | 01601fdd40ecf4467c8ae4d215dbb7d2a0599a2c (diff) |
Merge branch 'for-6.15/amd_sfh' into for-linus
From: Mario Limonciello <mario.limonciello@amd.com>
Some platforms include a human presence detection (HPD) sensor. When
enabled and a user is detected a wake event will be emitted from the
sensor fusion hub that software can react to.
Example use cases are "wake from suspend on approach" or to "lock
when leaving".
This is currently enabled by default on supported systems, but users
can't control it. This essentially means that wake on approach is
enabled which is a really surprising behavior to users that don't
expect it.
Instead of defaulting to enabled add a sysfs knob that users can
use to enable the feature if desirable and set it to disabled by
default.
Diffstat (limited to 'drivers/fpga/dfl-fme-error.c')
-rw-r--r-- | drivers/fpga/dfl-fme-error.c | 98 |
1 files changed, 51 insertions, 47 deletions
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c index 51c2892ec06d5..f00d949efe69b 100644 --- a/drivers/fpga/dfl-fme-error.c +++ b/drivers/fpga/dfl-fme-error.c @@ -42,15 +42,15 @@ static ssize_t pcie0_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 value; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); value = readq(base + PCIE0_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)value); } @@ -59,7 +59,7 @@ static ssize_t pcie0_errors_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; int ret = 0; u64 v, val; @@ -67,9 +67,9 @@ static ssize_t pcie0_errors_store(struct device *dev, if (kstrtou64(buf, 0, &val)) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK); v = readq(base + PCIE0_ERROR); @@ -79,7 +79,7 @@ static ssize_t pcie0_errors_store(struct device *dev, ret = -EINVAL; writeq(0ULL, base + PCIE0_ERROR_MASK); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret ? ret : count; } static DEVICE_ATTR_RW(pcie0_errors); @@ -87,15 +87,15 @@ static DEVICE_ATTR_RW(pcie0_errors); static ssize_t pcie1_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 value; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); value = readq(base + PCIE1_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)value); } @@ -104,7 +104,7 @@ static ssize_t pcie1_errors_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; int ret = 0; u64 v, val; @@ -112,9 +112,9 @@ static ssize_t pcie1_errors_store(struct device *dev, if (kstrtou64(buf, 0, &val)) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK); v = readq(base + PCIE1_ERROR); @@ -124,7 +124,7 @@ static ssize_t pcie1_errors_store(struct device *dev, ret = -EINVAL; writeq(0ULL, base + PCIE1_ERROR_MASK); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret ? ret : count; } static DEVICE_ATTR_RW(pcie1_errors); @@ -132,9 +132,10 @@ static DEVICE_ATTR_RW(pcie1_errors); static ssize_t nonfatal_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); return sprintf(buf, "0x%llx\n", (unsigned long long)readq(base + RAS_NONFAT_ERROR)); @@ -144,9 +145,10 @@ static DEVICE_ATTR_RO(nonfatal_errors); static ssize_t catfatal_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); return sprintf(buf, "0x%llx\n", (unsigned long long)readq(base + RAS_CATFAT_ERROR)); @@ -156,15 +158,15 @@ static DEVICE_ATTR_RO(catfatal_errors); static ssize_t inject_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); v = readq(base + RAS_ERROR_INJECT); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v)); @@ -174,7 +176,7 @@ static ssize_t inject_errors_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u8 inject_error; u64 v; @@ -185,14 +187,14 @@ static ssize_t inject_errors_store(struct device *dev, if (inject_error & ~INJECT_ERROR_MASK) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); v = readq(base + RAS_ERROR_INJECT); v &= ~INJECT_ERROR_MASK; v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error); writeq(v, base + RAS_ERROR_INJECT); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return count; } @@ -201,15 +203,15 @@ static DEVICE_ATTR_RW(inject_errors); static ssize_t fme_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 value; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); value = readq(base + FME_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)value); } @@ -218,7 +220,7 @@ static ssize_t fme_errors_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 v, val; int ret = 0; @@ -226,9 +228,9 @@ static ssize_t fme_errors_store(struct device *dev, if (kstrtou64(buf, 0, &val)) return -EINVAL; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK); v = readq(base + FME_ERROR); @@ -240,7 +242,7 @@ static ssize_t fme_errors_store(struct device *dev, /* Workaround: disable MBP_ERROR if feature revision is 0 */ writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR, base + FME_ERROR_MASK); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return ret ? ret : count; } static DEVICE_ATTR_RW(fme_errors); @@ -248,15 +250,15 @@ static DEVICE_ATTR_RW(fme_errors); static ssize_t first_error_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 value; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); value = readq(base + FME_FIRST_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)value); } @@ -265,15 +267,15 @@ static DEVICE_ATTR_RO(first_error); static ssize_t next_error_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; u64 value; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); value = readq(base + FME_NEXT_ERROR); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); return sprintf(buf, "0x%llx\n", (unsigned long long)value); } @@ -295,12 +297,14 @@ static umode_t fme_global_err_attrs_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); + struct dfl_feature_dev_data *fdata; + fdata = to_dfl_feature_dev_data(dev); /* * sysfs entries are visible only if related private feature is * enumerated. */ - if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR)) + if (!dfl_get_feature_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR)) return 0; return attr->mode; @@ -314,12 +318,12 @@ const struct attribute_group fme_global_err_group = { static void fme_err_mask(struct device *dev, bool mask) { - struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); void __iomem *base; - base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR); - mutex_lock(&pdata->lock); + mutex_lock(&fdata->lock); /* Workaround: keep MBP_ERROR always masked if revision is 0 */ if (dfl_feature_revision(base)) @@ -332,7 +336,7 @@ static void fme_err_mask(struct device *dev, bool mask) writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK); writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK); - mutex_unlock(&pdata->lock); + mutex_unlock(&fdata->lock); } static int fme_global_err_init(struct platform_device *pdev, |