diff options
Diffstat (limited to 'drivers')
258 files changed, 2214 insertions, 1438 deletions
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c index d50261d05f3a..515b20d0b698 100644 --- a/drivers/acpi/acpi_dbg.c +++ b/drivers/acpi/acpi_dbg.c @@ -569,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file) return 0; } -static int acpi_aml_read_user(char __user *buf, int len) +static ssize_t acpi_aml_read_user(char __user *buf, size_t len) { - int ret; struct circ_buf *crc = &acpi_aml_io.out_crc; - int n; + ssize_t ret; + size_t n; char *p; ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER); @@ -582,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len) /* sync head before removing logs */ smp_rmb(); p = &crc->buf[crc->tail]; - n = min(len, circ_count_to_end(crc)); + n = min_t(size_t, len, circ_count_to_end(crc)); if (copy_to_user(buf, p, n)) { ret = -EFAULT; goto out; @@ -599,8 +599,8 @@ out: static ssize_t acpi_aml_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - int ret = 0; - int size = 0; + ssize_t ret = 0; + ssize_t size = 0; if (!count) return 0; @@ -639,11 +639,11 @@ again: return size > 0 ? size : ret; } -static int acpi_aml_write_user(const char __user *buf, int len) +static ssize_t acpi_aml_write_user(const char __user *buf, size_t len) { - int ret; struct circ_buf *crc = &acpi_aml_io.in_crc; - int n; + ssize_t ret; + size_t n; char *p; ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER); @@ -652,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len) /* sync tail before inserting cmds */ smp_mb(); p = &crc->buf[crc->head]; - n = min(len, circ_space_to_end(crc)); + n = min_t(size_t, len, circ_space_to_end(crc)); if (copy_from_user(p, buf, n)) { ret = -EFAULT; goto out; @@ -663,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len) ret = n; out: acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0); - return n; + return ret; } static ssize_t acpi_aml_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - int ret = 0; - int size = 0; + ssize_t ret = 0; + ssize_t size = 0; if (!count) return 0; diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c index b831cb8e53dc..b75ceaab716e 100644 --- a/drivers/acpi/acpi_tad.c +++ b/drivers/acpi/acpi_tad.c @@ -565,6 +565,9 @@ static void acpi_tad_remove(struct platform_device *pdev) pm_runtime_get_sync(dev); + if (dd->capabilities & ACPI_TAD_RT) + sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group); + if (dd->capabilities & ACPI_TAD_DC_WAKE) sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group); diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 6f4fe47c955b..35460c2072a4 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -1141,7 +1141,7 @@ struct acpi_port_info { #define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91 #define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92 #define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93 -#define ACPI_RESOURCE_NAME_LARGE_MAX 0x94 +#define ACPI_RESOURCE_NAME_LARGE_MAX 0x93 /***************************************************************************** * diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index 989dc01af03f..bc205b330904 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c @@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void) return_ACPI_STATUS(AE_OK); } + if (!acpi_gbl_use_global_lock) { + return_ACPI_STATUS(AE_OK); + } + /* Attempt installation of the global lock handler */ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 65fa3444367a..11c7e35fafa2 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -92,7 +92,7 @@ enum { struct acpi_battery { struct mutex lock; - struct mutex sysfs_lock; + struct mutex update_lock; struct power_supply *bat; struct power_supply_desc bat_desc; struct acpi_device *device; @@ -903,15 +903,12 @@ static int sysfs_add_battery(struct acpi_battery *battery) static void sysfs_remove_battery(struct acpi_battery *battery) { - mutex_lock(&battery->sysfs_lock); - if (!battery->bat) { - mutex_unlock(&battery->sysfs_lock); + if (!battery->bat) return; - } + battery_hook_remove_battery(battery); power_supply_unregister(battery->bat); battery->bat = NULL; - mutex_unlock(&battery->sysfs_lock); } static void find_battery(const struct dmi_header *dm, void *private) @@ -1071,6 +1068,9 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data) if (!battery) return; + + guard(mutex)(&battery->update_lock); + old = battery->bat; /* * On Acer Aspire V5-573G notifications are sometimes triggered too @@ -1093,21 +1093,22 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data) } static int battery_notify(struct notifier_block *nb, - unsigned long mode, void *_unused) + unsigned long mode, void *_unused) { struct acpi_battery *battery = container_of(nb, struct acpi_battery, pm_nb); - int result; - switch (mode) { - case PM_POST_HIBERNATION: - case PM_POST_SUSPEND: + if (mode == PM_POST_SUSPEND || mode == PM_POST_HIBERNATION) { + guard(mutex)(&battery->update_lock); + if (!acpi_battery_present(battery)) return 0; if (battery->bat) { acpi_battery_refresh(battery); } else { + int result; + result = acpi_battery_get_info(battery); if (result) return result; @@ -1119,7 +1120,6 @@ static int battery_notify(struct notifier_block *nb, acpi_battery_init_alarm(battery); acpi_battery_get_state(battery); - break; } return 0; @@ -1197,6 +1197,8 @@ static int acpi_battery_update_retry(struct acpi_battery *battery) { int retry, ret; + guard(mutex)(&battery->update_lock); + for (retry = 5; retry; retry--) { ret = acpi_battery_update(battery, false); if (!ret) @@ -1207,6 +1209,13 @@ static int acpi_battery_update_retry(struct acpi_battery *battery) return ret; } +static void sysfs_battery_cleanup(struct acpi_battery *battery) +{ + guard(mutex)(&battery->update_lock); + + sysfs_remove_battery(battery); +} + static int acpi_battery_add(struct acpi_device *device) { int result = 0; @@ -1218,15 +1227,21 @@ static int acpi_battery_add(struct acpi_device *device) if (device->dep_unmet) return -EPROBE_DEFER; - battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL); + battery = devm_kzalloc(&device->dev, sizeof(*battery), GFP_KERNEL); if (!battery) return -ENOMEM; battery->device = device; strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME); strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS); device->driver_data = battery; - mutex_init(&battery->lock); - mutex_init(&battery->sysfs_lock); + result = devm_mutex_init(&device->dev, &battery->lock); + if (result) + return result; + + result = devm_mutex_init(&device->dev, &battery->update_lock); + if (result) + return result; + if (acpi_has_method(battery->device->handle, "_BIX")) set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); @@ -1253,10 +1268,7 @@ fail_pm: device_init_wakeup(&device->dev, 0); unregister_pm_notifier(&battery->pm_nb); fail: - sysfs_remove_battery(battery); - mutex_destroy(&battery->lock); - mutex_destroy(&battery->sysfs_lock); - kfree(battery); + sysfs_battery_cleanup(battery); return result; } @@ -1275,11 +1287,10 @@ static void acpi_battery_remove(struct acpi_device *device) device_init_wakeup(&device->dev, 0); unregister_pm_notifier(&battery->pm_nb); - sysfs_remove_battery(battery); - mutex_destroy(&battery->lock); - mutex_destroy(&battery->sysfs_lock); - kfree(battery); + guard(mutex)(&battery->update_lock); + + sysfs_remove_battery(battery); } #ifdef CONFIG_PM_SLEEP @@ -1296,6 +1307,9 @@ static int acpi_battery_resume(struct device *dev) return -EINVAL; battery->update_time = 0; + + guard(mutex)(&battery->update_lock); + acpi_battery_update(battery, true); return 0; } diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index ae035b93da08..3eb56b77cb6d 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2637,7 +2637,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, if (ndr_desc->target_node == NUMA_NO_NODE) { ndr_desc->target_node = phys_to_target_node(spa->address); dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]", - NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); + NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end); } /* diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 0888e4d618d5..b524cf27213d 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1410,6 +1410,9 @@ int acpi_processor_power_init(struct acpi_processor *pr) if (retval) { if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); + + per_cpu(acpi_cpuidle_device, pr->id) = NULL; + kfree(dev); return retval; } acpi_processor_registered++; diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index e9186339f6e6..342e7cef723c 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -83,6 +83,7 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc, struct fwnode_handle *parent) { struct acpi_data_node *dn; + acpi_handle scope = NULL; bool result; if (acpi_graph_ignore_port(handle)) @@ -98,29 +99,35 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc, INIT_LIST_HEAD(&dn->data.properties); INIT_LIST_HEAD(&dn->data.subnodes); - result = acpi_extract_properties(handle, desc, &dn->data); - - if (handle) { - acpi_handle scope; - acpi_status status; + /* + * The scope for the completion of relative pathname segments and + * subnode object lookup is the one of the namespace node (device) + * containing the object that has returned the package. That is, it's + * the scope of that object's parent device. + */ + if (handle) + acpi_get_parent(handle, &scope); - /* - * The scope for the subnode object lookup is the one of the - * namespace node (device) containing the object that has - * returned the package. That is, it's the scope of that - * object's parent. - */ - status = acpi_get_parent(handle, &scope); - if (ACPI_SUCCESS(status) - && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, - &dn->fwnode)) - result = true; - } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data, - &dn->fwnode)) { + /* + * Extract properties from the _DSD-equivalent package pointed to by + * desc and use scope (if not NULL) for the completion of relative + * pathname segments. + * + * The extracted properties will be held in the new data node dn. + */ + result = acpi_extract_properties(scope, desc, &dn->data); + /* + * Look for subnodes in the _DSD-equivalent package pointed to by desc + * and create child nodes of dn if there are any. + */ + if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode)) result = true; - } if (result) { + /* + * This will be NULL if the desc package is embedded in an outer + * _DSD-equivalent package and its scope cannot be determined. + */ dn->handle = handle; dn->data.pointer = desc; list_add_tail(&dn->sibling, list); @@ -132,35 +139,21 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc, return false; } -static bool acpi_nondev_subnode_data_ok(acpi_handle handle, - const union acpi_object *link, - struct list_head *list, - struct fwnode_handle *parent) -{ - struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; - acpi_status status; - - status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf, - ACPI_TYPE_PACKAGE); - if (ACPI_FAILURE(status)) - return false; - - if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list, - parent)) - return true; - - ACPI_FREE(buf.pointer); - return false; -} - static bool acpi_nondev_subnode_ok(acpi_handle scope, const union acpi_object *link, struct list_head *list, struct fwnode_handle *parent) { + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; acpi_handle handle; acpi_status status; + /* + * If the scope is unknown, the _DSD-equivalent package being parsed + * was embedded in an outer _DSD-equivalent package as a result of + * direct evaluation of an object pointed to by a reference. In that + * case, using a pathname as the target object pointer is invalid. + */ if (!scope) return false; @@ -169,7 +162,17 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope, if (ACPI_FAILURE(status)) return false; - return acpi_nondev_subnode_data_ok(handle, link, list, parent); + status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf, + ACPI_TYPE_PACKAGE); + if (ACPI_FAILURE(status)) + return false; + + if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list, + parent)) + return true; + + ACPI_FREE(buf.pointer); + return false; } static bool acpi_add_nondev_subnodes(acpi_handle scope, @@ -180,9 +183,12 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope, bool ret = false; int i; + /* + * Every element in the links package is expected to represent a link + * to a non-device node in a tree containing device-specific data. + */ for (i = 0; i < links->package.count; i++) { union acpi_object *link, *desc; - acpi_handle handle; bool result; link = &links->package.elements[i]; @@ -190,26 +196,53 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope, if (link->package.count != 2) continue; - /* The first one must be a string. */ + /* The first one (the key) must be a string. */ if (link->package.elements[0].type != ACPI_TYPE_STRING) continue; - /* The second one may be a string, a reference or a package. */ + /* The second one (the target) may be a string or a package. */ switch (link->package.elements[1].type) { case ACPI_TYPE_STRING: + /* + * The string is expected to be a full pathname or a + * pathname segment relative to the given scope. That + * pathname is expected to point to an object returning + * a package that contains _DSD-equivalent information. + */ result = acpi_nondev_subnode_ok(scope, link, list, parent); break; - case ACPI_TYPE_LOCAL_REFERENCE: - handle = link->package.elements[1].reference.handle; - result = acpi_nondev_subnode_data_ok(handle, link, list, - parent); - break; case ACPI_TYPE_PACKAGE: + /* + * This happens when a reference is used in AML to + * point to the target. Since the target is expected + * to be a named object, a reference to it will cause it + * to be avaluated in place and its return package will + * be embedded in the links package at the location of + * the reference. + * + * The target package is expected to contain _DSD- + * equivalent information, but the scope in which it + * is located in the original AML is unknown. Thus + * it cannot contain pathname segments represented as + * strings because there is no way to build full + * pathnames out of them. + */ + acpi_handle_debug(scope, "subnode %s: Unknown scope\n", + link->package.elements[0].string.pointer); desc = &link->package.elements[1]; result = acpi_nondev_subnode_extract(desc, NULL, link, list, parent); break; + case ACPI_TYPE_LOCAL_REFERENCE: + /* + * It is not expected to see any local references in + * the links package because referencing a named object + * should cause it to be evaluated in place. + */ + acpi_handle_info(scope, "subnode %s: Unexpected reference\n", + link->package.elements[0].string.pointer); + fallthrough; default: result = false; break; @@ -369,6 +402,9 @@ static void acpi_untie_nondev_subnodes(struct acpi_device_data *data) struct acpi_data_node *dn; list_for_each_entry(dn, &data->subnodes, sibling) { + if (!dn->handle) + continue; + acpi_detach_data(dn->handle, acpi_nondev_subnode_tag); acpi_untie_nondev_subnodes(&dn->data); @@ -383,6 +419,9 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data) acpi_status status; bool ret; + if (!dn->handle) + continue; + status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn); if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) { acpi_handle_err(dn->handle, "Can't tag data node\n"); diff --git a/drivers/base/node.c b/drivers/base/node.c index eb72580288e6..deccfe68214e 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -879,6 +879,10 @@ int __register_one_node(int nid) node_devices[nid] = node; error = register_node(node_devices[nid], nid); + if (error) { + node_devices[nid] = NULL; + return error; + } /* link cpu under this node */ for_each_present_cpu(cpu) { diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index faf4cdec23f0..abb16a5bb796 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -628,8 +628,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy if (dev->power.syscore || dev->power.direct_complete) goto Out; - if (!dev->power.is_noirq_suspended) + if (!dev->power.is_noirq_suspended) { + /* + * This means that system suspend has been aborted in the noirq + * phase before invoking the noirq suspend callback for the + * device, so if device_suspend_late() has left it in suspend, + * device_resume_early() should leave it in suspend either in + * case the early resume of it depends on the noirq resume that + * has not run. + */ + if (dev_pm_skip_suspend(dev)) + dev->power.must_resume = false; + goto Out; + } if (!dpm_wait_for_superior(dev, async)) goto Out; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index de4e2f3db942..66b3840bd96e 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -828,7 +828,7 @@ struct regmap *__regmap_init(struct device *dev, map->read_flag_mask = bus->read_flag_mask; } - if (config && config->read && config->write) { + if (config->read && config->write) { map->reg_read = _regmap_bus_read; if (config->reg_update_bits) map->reg_update_bits = config->reg_update_bits; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index db9b5164ccca..dd4117192201 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -536,8 +536,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, return -EBADF; error = loop_check_backing_file(file); - if (error) + if (error) { + fput(file); return error; + } /* suppress uevents while reconfiguring the device */ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); @@ -973,8 +975,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, return -EBADF; error = loop_check_backing_file(file); - if (error) + if (error) { + fput(file); return error; + } is_loop = is_loop_device(file); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index c705acc4d6f4..de692eed9874 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1156,6 +1156,14 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, if (!sock) return NULL; + if (!sk_is_tcp(sock->sk) && + !sk_is_stream_unix(sock->sk)) { + dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n"); + *err = -EINVAL; + sockfd_put(sock); + return NULL; + } + if (sock->ops->shutdown == sock_no_shutdown) { dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); *err = -EINVAL; diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index f10369ad90f7..ceb7aeca5d9b 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -223,7 +223,7 @@ MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed nu static unsigned long g_cache_size; module_param_named(cache_size, g_cache_size, ulong, 0444); -MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)"); +MODULE_PARM_DESC(cache_size, "Cache size in MiB for memory-backed device. Default: 0 (none)"); static bool g_fua = true; module_param_named(fua, g_fua, bool, 0444); diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 4575d9a4e5ed..dbc8d8f14ce7 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -1103,6 +1103,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) * Get physical address of MC portal for the root DPRC: */ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!plat_res) + return -EINVAL; + mc_portal_phys_addr = plat_res->start; mc_portal_size = resource_size(plat_res); mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff; diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index b3eafcf2a2c5..cdea24e92919 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -403,17 +403,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, { struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; struct device *dev = &mhi_cntrl->mhi_dev->dev; - size_t tr_len, read_offset, write_offset; + size_t tr_len, read_offset; struct mhi_ep_buf_info buf_info = {}; u32 len = MHI_EP_DEFAULT_MTU; struct mhi_ring_element *el; - bool tr_done = false; void *buf_addr; - u32 buf_left; int ret; - buf_left = len; - do { /* Don't process the transfer ring if the channel is not in RUNNING state */ if (mhi_chan->state != MHI_CH_STATE_RUNNING) { @@ -426,24 +422,23 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, /* Check if there is data pending to be read from previous read operation */ if (mhi_chan->tre_bytes_left) { dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); - tr_len = min(buf_left, mhi_chan->tre_bytes_left); + tr_len = min(len, mhi_chan->tre_bytes_left); } else { mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); mhi_chan->tre_bytes_left = mhi_chan->tre_size; - tr_len = min(buf_left, mhi_chan->tre_size); + tr_len = min(len, mhi_chan->tre_size); } read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; - write_offset = len - buf_left; buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL); if (!buf_addr) return -ENOMEM; buf_info.host_addr = mhi_chan->tre_loc + read_offset; - buf_info.dev_addr = buf_addr + write_offset; + buf_info.dev_addr = buf_addr; buf_info.size = tr_len; buf_info.cb = mhi_ep_read_completion; buf_info.cb_buf = buf_addr; @@ -459,16 +454,12 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_buf_addr; } - buf_left -= tr_len; mhi_chan->tre_bytes_left -= tr_len; - if (!mhi_chan->tre_bytes_left) { - if (MHI_TRE_DATA_GET_IEOT(el)) - tr_done = true; - + if (!mhi_chan->tre_bytes_left) mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; - } - } while (buf_left && !tr_done); + /* Read until the some buffer is left or the ring becomes not empty */ + } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); return 0; @@ -502,15 +493,11 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring) mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); } else { /* UL channel */ - do { - ret = mhi_ep_read_channel(mhi_cntrl, ring); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); - return ret; - } - - /* Read until the ring becomes empty */ - } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); + ret = mhi_ep_read_channel(mhi_cntrl, ring); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); + return ret; + } } return 0; diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index a9b1f8beee7b..223fc54c45b3 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -194,7 +194,6 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) { struct mhi_event *mhi_event = mhi_cntrl->mhi_event; - struct device *dev = &mhi_cntrl->mhi_dev->dev; unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; int i, ret; @@ -221,7 +220,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) continue; if (mhi_event->irq >= mhi_cntrl->nr_irqs) { - dev_err(dev, "irq %d not available for event ring\n", + dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n", mhi_event->irq); ret = -EINVAL; goto error_request; @@ -232,7 +231,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) irq_flags, "mhi", mhi_event); if (ret) { - dev_err(dev, "Error requesting irq:%d for ev:%d\n", + dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n", mhi_cntrl->irq[mhi_event->irq], i); goto error_request; } diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index b51d9e243f35..f0dde77f50b4 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -286,6 +286,7 @@ config HW_RANDOM_INGENIC_TRNG config HW_RANDOM_NOMADIK tristate "ST-Ericsson Nomadik Random Number Generator support" depends on ARCH_NOMADIK || COMPILE_TEST + depends on ARM_AMBA default HW_RANDOM help This driver provides kernel-side support for the Random Number diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c index 36c34252b4f6..3c514b4fbc8a 100644 --- a/drivers/char/hw_random/ks-sa-rng.c +++ b/drivers/char/hw_random/ks-sa-rng.c @@ -231,6 +231,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev) if (IS_ERR(ks_sa_rng->regmap_cfg)) return dev_err_probe(dev, -EINVAL, "syscon_node_to_regmap failed\n"); + ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(ks_sa_rng->clk)) + return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n"); + pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); if (ret < 0) { diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index ecfcb50302f6..efda90dcf5b3 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c @@ -122,10 +122,10 @@ struct si_sm_data { unsigned long error0_timeout; }; -static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs, - struct si_sm_io *io, enum kcs_states state) +static unsigned int init_kcs_data(struct si_sm_data *kcs, + struct si_sm_io *io) { - kcs->state = state; + kcs->state = KCS_IDLE; kcs->io = io; kcs->write_pos = 0; kcs->write_count = 0; @@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs, return 2; } -static unsigned int init_kcs_data(struct si_sm_data *kcs, - struct si_sm_io *io) -{ - return init_kcs_data_with_state(kcs, io, KCS_IDLE); -} - static inline unsigned char read_status(struct si_sm_data *kcs) { return kcs->io->inputb(kcs->io, 1); @@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, if (size > MAX_KCS_WRITE_SIZE) return IPMI_REQ_LEN_EXCEEDED_ERR; - if (kcs->state != KCS_IDLE) { + if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) { dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state); return IPMI_NOT_IN_MY_STATE_ERR; } @@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) } if (kcs->state == KCS_HOSED) { - init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0); + init_kcs_data(kcs, kcs->io); return SI_SM_HOSED; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 09405668ebb3..99fe01321971 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -39,7 +39,9 @@ #define IPMI_DRIVER_VERSION "39.2" -static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user); +static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, + struct ipmi_user *user); static int ipmi_init_msghandler(void); static void smi_recv_work(struct work_struct *t); static void handle_new_recv_msgs(struct ipmi_smi *intf); @@ -939,13 +941,11 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) * risk. At this moment, simply skip it in that case. */ ipmi_free_recv_msg(msg); - atomic_dec(&msg->user->nr_msgs); } else { int index; struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); if (user) { - atomic_dec(&user->nr_msgs); user->handler->ipmi_recv_hndl(msg, user->handler_data); release_ipmi_user(user, index); } else { @@ -1634,8 +1634,7 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val) spin_unlock_irqrestore(&intf->events_lock, flags); list_for_each_entry_safe(msg, msg2, &msgs, link) { - msg->user = user; - kref_get(&user->refcount); + ipmi_set_recv_msg_user(msg, user); deliver_local_response(intf, msg); } @@ -2309,22 +2308,18 @@ static int i_ipmi_request(struct ipmi_user *user, struct ipmi_recv_msg *recv_msg; int rv = 0; - if (user) { - if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { - /* Decrement will happen at the end of the routine. */ - rv = -EBUSY; - goto out; - } - } - - if (supplied_recv) + if (supplied_recv) { recv_msg = supplied_recv; - else { - recv_msg = ipmi_alloc_recv_msg(); - if (recv_msg == NULL) { - rv = -ENOMEM; - goto out; + recv_msg->user = user; + if (user) { + atomic_inc(&user->nr_msgs); + /* The put happens when the message is freed. */ + kref_get(&user->refcount); } + } else { + recv_msg = ipmi_alloc_recv_msg(user); + if (IS_ERR(recv_msg)) + return PTR_ERR(recv_msg); } recv_msg->user_msg_data = user_msg_data; @@ -2335,8 +2330,7 @@ static int i_ipmi_request(struct ipmi_user *user, if (smi_msg == NULL) { if (!supplied_recv) ipmi_free_recv_msg(recv_msg); - rv = -ENOMEM; - goto out; + return -ENOMEM; } } @@ -2346,10 +2340,6 @@ static int i_ipmi_request(struct ipmi_user *user, goto out_err; } - recv_msg->user = user; - if (user) - /* The put happens when the message is freed. */ - kref_get(&user->refcount); recv_msg->msgid = msgid; /* * Store the message to send in the receive message so timeout @@ -2378,8 +2368,10 @@ static int i_ipmi_request(struct ipmi_user *user, if (rv) { out_err: - ipmi_free_smi_msg(smi_msg); - ipmi_free_recv_msg(recv_msg); + if (!supplied_smi) + ipmi_free_smi_msg(smi_msg); + if (!supplied_recv) + ipmi_free_recv_msg(recv_msg); } else { dev_dbg(intf->si_dev, "Send: %*ph\n", smi_msg->data_size, smi_msg->data); @@ -2388,9 +2380,6 @@ out_err: } rcu_read_unlock(); -out: - if (rv && user) - atomic_dec(&user->nr_msgs); return rv; } @@ -3882,7 +3871,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_ipmb_addr *ipmb_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ @@ -3903,9 +3892,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -3940,47 +3928,41 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, rv = -1; } rcu_read_unlock(); - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; - ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; - ipmb_addr->slave_addr = msg->rsp[6]; - ipmb_addr->lun = msg->rsp[7] & 3; - ipmb_addr->channel = msg->rsp[3] & 0xf; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; + ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; + ipmb_addr->slave_addr = msg->rsp[6]; + ipmb_addr->lun = msg->rsp[7] & 3; + ipmb_addr->channel = msg->rsp[3] & 0xf; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = msg->rsp[7] >> 2; - recv_msg->msg.netfn = msg->rsp[4] >> 2; - recv_msg->msg.cmd = msg->rsp[8]; - recv_msg->msg.data = recv_msg->msg_data; + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[7] >> 2; + recv_msg->msg.netfn = msg->rsp[4] >> 2; + recv_msg->msg.cmd = msg->rsp[8]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * We chop off 10, not 9 bytes because the checksum - * at the end also needs to be removed. - */ - recv_msg->msg.data_len = msg->rsp_size - 10; - memcpy(recv_msg->msg_data, &msg->rsp[9], - msg->rsp_size - 10); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * We chop off 10, not 9 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 10; + memcpy(recv_msg->msg_data, &msg->rsp[9], + msg->rsp_size - 10); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -3993,7 +3975,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, int rv = 0; struct ipmi_user *user = NULL; struct ipmi_ipmb_direct_addr *daddr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; unsigned char netfn = msg->rsp[0] >> 2; unsigned char cmd = msg->rsp[3]; @@ -4002,9 +3984,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -4031,44 +4012,38 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, rv = -1; } rcu_read_unlock(); - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; - daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; - daddr->channel = 0; - daddr->slave_addr = msg->rsp[1]; - daddr->rs_lun = msg->rsp[0] & 3; - daddr->rq_lun = msg->rsp[2] & 3; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; + daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; + daddr->channel = 0; + daddr->slave_addr = msg->rsp[1]; + daddr->rs_lun = msg->rsp[0] & 3; + daddr->rq_lun = msg->rsp[2] & 3; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = (msg->rsp[2] >> 2); - recv_msg->msg.netfn = msg->rsp[0] >> 2; - recv_msg->msg.cmd = msg->rsp[3]; - recv_msg->msg.data = recv_msg->msg_data; - - recv_msg->msg.data_len = msg->rsp_size - 4; - memcpy(recv_msg->msg_data, msg->rsp + 4, - msg->rsp_size - 4); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = (msg->rsp[2] >> 2); + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[3]; + recv_msg->msg.data = recv_msg->msg_data; + + recv_msg->msg.data_len = msg->rsp_size - 4; + memcpy(recv_msg->msg_data, msg->rsp + 4, + msg->rsp_size - 4); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4182,7 +4157,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_lan_addr *lan_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; if (msg->rsp_size < 12) { /* Message not big enough, just ignore it. */ @@ -4203,9 +4178,8 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -4217,49 +4191,44 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, * them to be freed. */ rv = 0; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; - lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; - lan_addr->session_handle = msg->rsp[4]; - lan_addr->remote_SWID = msg->rsp[8]; - lan_addr->local_SWID = msg->rsp[5]; - lan_addr->lun = msg->rsp[9] & 3; - lan_addr->channel = msg->rsp[3] & 0xf; - lan_addr->privilege = msg->rsp[3] >> 4; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; + lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; + lan_addr->session_handle = msg->rsp[4]; + lan_addr->remote_SWID = msg->rsp[8]; + lan_addr->local_SWID = msg->rsp[5]; + lan_addr->lun = msg->rsp[9] & 3; + lan_addr->channel = msg->rsp[3] & 0xf; + lan_addr->privilege = msg->rsp[3] >> 4; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = msg->rsp[9] >> 2; - recv_msg->msg.netfn = msg->rsp[6] >> 2; - recv_msg->msg.cmd = msg->rsp[10]; - recv_msg->msg.data = recv_msg->msg_data; + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[9] >> 2; + recv_msg->msg.netfn = msg->rsp[6] >> 2; + recv_msg->msg.cmd = msg->rsp[10]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * We chop off 12, not 11 bytes because the checksum - * at the end also needs to be removed. - */ - recv_msg->msg.data_len = msg->rsp_size - 12; - memcpy(recv_msg->msg_data, &msg->rsp[11], - msg->rsp_size - 12); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * We chop off 12, not 11 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 12; + memcpy(recv_msg->msg_data, &msg->rsp[11], + msg->rsp_size - 12); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4281,7 +4250,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_system_interface_addr *smi_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; /* * We expect the OEM SW to perform error checking @@ -4310,9 +4279,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -4325,48 +4293,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, */ rv = 0; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* - * OEM Messages are expected to be delivered via - * the system interface to SMS software. We might - * need to visit this again depending on OEM - * requirements - */ - smi_addr = ((struct ipmi_system_interface_addr *) - &recv_msg->addr); - smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; - smi_addr->channel = IPMI_BMC_CHANNEL; - smi_addr->lun = msg->rsp[0] & 3; - - recv_msg->user = user; - recv_msg->user_msg_data = NULL; - recv_msg->recv_type = IPMI_OEM_RECV_TYPE; - recv_msg->msg.netfn = msg->rsp[0] >> 2; - recv_msg->msg.cmd = msg->rsp[1]; - recv_msg->msg.data = recv_msg->msg_data; + } else if (!IS_ERR(recv_msg)) { + /* + * OEM Messages are expected to be delivered via + * the system interface to SMS software. We might + * need to visit this again depending on OEM + * requirements + */ + smi_addr = ((struct ipmi_system_interface_addr *) + &recv_msg->addr); + smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr->channel = IPMI_BMC_CHANNEL; + smi_addr->lun = msg->rsp[0] & 3; + + recv_msg->user_msg_data = NULL; + recv_msg->recv_type = IPMI_OEM_RECV_TYPE; + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[1]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * The message starts at byte 4 which follows the - * Channel Byte in the "GET MESSAGE" command - */ - recv_msg->msg.data_len = msg->rsp_size - 4; - memcpy(recv_msg->msg_data, &msg->rsp[4], - msg->rsp_size - 4); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * The message starts at byte 4 which follows the + * Channel Byte in the "GET MESSAGE" command + */ + recv_msg->msg.data_len = msg->rsp_size - 4; + memcpy(recv_msg->msg_data, &msg->rsp[4], + msg->rsp_size - 4); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4425,8 +4387,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, if (!user->gets_events) continue; - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { + recv_msg = ipmi_alloc_recv_msg(user); + if (IS_ERR(recv_msg)) { rcu_read_unlock(); list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { @@ -4445,8 +4407,6 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, deliver_count++; copy_event_into_recv_msg(recv_msg, msg); - recv_msg->user = user; - kref_get(&user->refcount); list_add_tail(&recv_msg->link, &msgs); } srcu_read_unlock(&intf->users_srcu, index); @@ -4462,8 +4422,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, * No one to receive the message, put it in queue if there's * not already too many things in the queue. */ - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { + recv_msg = ipmi_alloc_recv_msg(NULL); + if (IS_ERR(recv_msg)) { /* * We couldn't allocate memory for the * message, so requeue it for handling @@ -5155,27 +5115,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg) kfree(msg); } -static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user) { struct ipmi_recv_msg *rv; + if (user) { + if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { + atomic_dec(&user->nr_msgs); + return ERR_PTR(-EBUSY); + } + } + rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); - if (rv) { - rv->user = NULL; - rv->done = free_recv_msg; - atomic_inc(&recv_msg_inuse_count); + if (!rv) { + if (user) + atomic_dec(&user->nr_msgs); + return ERR_PTR(-ENOMEM); } + + rv->user = user; + rv->done = free_recv_msg; + if (user) + kref_get(&user->refcount); + atomic_inc(&recv_msg_inuse_count); return rv; } void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) { - if (msg->user && !oops_in_progress) + if (msg->user && !oops_in_progress) { + atomic_dec(&msg->user->nr_msgs); kref_put(&msg->user->refcount, free_user); + } msg->done(msg); } EXPORT_SYMBOL(ipmi_free_recv_msg); +static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, + struct ipmi_user *user) +{ + WARN_ON_ONCE(msg->user); /* User should not be set. */ + msg->user = user; + atomic_inc(&user->nr_msgs); + kref_get(&user->refcount); +} + static atomic_t panic_done_count = ATOMIC_INIT(0); static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index cf0be8a7939d..db41301e63f2 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -29,7 +29,7 @@ if TCG_TPM config TCG_TPM2_HMAC bool "Use HMAC and encrypted transactions on the TPM bus" - default X86_64 + default n select CRYPTO_ECDH select CRYPTO_LIB_AESCFB select CRYPTO_LIB_SHA256 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index ed0d3d8449b3..59e992dc65c4 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -977,8 +977,8 @@ restore_irqs: * will call disable_irq which undoes all of the above. */ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { - tpm_tis_write8(priv, original_int_vec, - TPM_INT_VECTOR(priv->locality)); + tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), + original_int_vec); rc = -1; } diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c index c173a44c800a..629f050a855a 100644 --- a/drivers/clk/at91/clk-peripheral.c +++ b/drivers/clk/at91/clk-peripheral.c @@ -279,8 +279,11 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw, long best_diff = LONG_MIN; u32 shift; - if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) - return parent_rate; + if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) { + req->rate = parent_rate; + + return 0; + } /* Fist step: check the available dividers. */ for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) { diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c index bb648a88e43a..ad47fdb23460 100644 --- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c +++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c @@ -103,7 +103,7 @@ static const struct mtk_gate infra_ao_clks[] = { GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28), GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29), /* INFRA_AO1 */ - GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0), + GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "top_hdmi_xtal", 0), GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1), GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2), GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4), diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c index 60990296450b..9a12e58230be 100644 --- a/drivers/clk/mediatek/clk-mux.c +++ b/drivers/clk/mediatek/clk-mux.c @@ -146,9 +146,7 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index) static int mtk_clk_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { - struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); - - return clk_mux_determine_rate_flags(hw, req, mux->data->flags); + return clk_mux_determine_rate_flags(hw, req, 0); } const struct clk_ops mtk_mux_clr_set_upd_ops = { diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c index 81efa885069b..b9e204d63a97 100644 --- a/drivers/clk/nxp/clk-lpc18xx-cgu.c +++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c @@ -370,23 +370,25 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw, return 0; } -static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int lpc18xx_pll0_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { unsigned long m; - if (*prate < rate) { + if (req->best_parent_rate < req->rate) { pr_warn("%s: pll dividers not supported\n", __func__); return -EINVAL; } - m = DIV_ROUND_UP_ULL(*prate, rate * 2); - if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) { - pr_warn("%s: unable to support rate %lu\n", __func__, rate); + m = DIV_ROUND_UP_ULL(req->best_parent_rate, req->rate * 2); + if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) { + pr_warn("%s: unable to support rate %lu\n", __func__, req->rate); return -EINVAL; } - return 2 * *prate * m; + req->rate = 2 * req->best_parent_rate * m; + + return 0; } static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate, @@ -402,7 +404,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate, } m = DIV_ROUND_UP_ULL(parent_rate, rate * 2); - if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) { + if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) { pr_warn("%s: unable to support rate %lu\n", __func__, rate); return -EINVAL; } @@ -443,7 +445,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate, static const struct clk_ops lpc18xx_pll0_ops = { .recalc_rate = lpc18xx_pll0_recalc_rate, - .round_rate = lpc18xx_pll0_round_rate, + .determine_rate = lpc18xx_pll0_determine_rate, .set_rate = lpc18xx_pll0_set_rate, }; diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index 33cc1f73c69d..cab2dbb7a8d4 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -273,8 +273,8 @@ static int qcom_cc_icc_register(struct device *dev, icd[i].slave_id = desc->icc_hws[i].slave_id; hws = &desc->clks[desc->icc_hws[i].clk_id]->hw; icd[i].clk = devm_clk_hw_get_clk(dev, hws, "icc"); - if (!icd[i].clk) - return dev_err_probe(dev, -ENOENT, + if (IS_ERR(icd[i].clk)) + return dev_err_probe(dev, PTR_ERR(icd[i].clk), "(%d) clock entry is null\n", i); icd[i].name = clk_hw_get_name(hws); } diff --git a/drivers/clk/qcom/tcsrcc-x1e80100.c b/drivers/clk/qcom/tcsrcc-x1e80100.c index ff61769a0807..a367e1f55622 100644 --- a/drivers/clk/qcom/tcsrcc-x1e80100.c +++ b/drivers/clk/qcom/tcsrcc-x1e80100.c @@ -29,6 +29,10 @@ static struct clk_branch tcsr_edp_clkref_en = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "tcsr_edp_clkref_en", + .parent_data = &(const struct clk_parent_data){ + .index = DT_BI_TCXO_PAD, + }, + .num_parents = 1, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 0f27c33192e1..112ed81f648e 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -1012,6 +1012,7 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv, of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) { int idx; + unsigned int *new_ids; if (it.node != priv->np) continue; @@ -1022,11 +1023,13 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv, if (args[0] != CPG_MOD) continue; - ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL); - if (!ids) { + new_ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL); + if (!new_ids) { of_node_put(it.node); + kfree(ids); return -ENOMEM; } + ids = new_ids; if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) idx = MOD_CLK_PACK_10(args[1]); /* for DEF_MOD_STB() */ diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c index 7bfba0afd778..4ec408c3a26a 100644 --- a/drivers/clk/tegra/clk-bpmp.c +++ b/drivers/clk/tegra/clk-bpmp.c @@ -635,7 +635,7 @@ static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp, bpmp->num_clocks = count; - bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(struct tegra_bpmp_clk), GFP_KERNEL); + bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(*bpmp->clocks), GFP_KERNEL); if (!bpmp->clocks) return -ENOMEM; diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c index e95fdc49c226..bbceb0289d45 100644 --- a/drivers/clocksource/clps711x-timer.c +++ b/drivers/clocksource/clps711x-timer.c @@ -78,24 +78,33 @@ static int __init clps711x_timer_init(struct device_node *np) unsigned int irq = irq_of_parse_and_map(np, 0); struct clk *clock = of_clk_get(np, 0); void __iomem *base = of_iomap(np, 0); + int ret = 0; if (!base) return -ENOMEM; - if (!irq) - return -EINVAL; - if (IS_ERR(clock)) - return PTR_ERR(clock); + if (!irq) { + ret = -EINVAL; + goto unmap_io; + } + if (IS_ERR(clock)) { + ret = PTR_ERR(clock); + goto unmap_io; + } switch (of_alias_get_id(np, "timer")) { case CLPS711X_CLKSRC_CLOCKSOURCE: clps711x_clksrc_init(clock, base); break; case CLPS711X_CLKSRC_CLOCKEVENT: - return _clps711x_clkevt_init(clock, base, irq); + ret = _clps711x_clkevt_init(clock, base, irq); + break; default: - return -EINVAL; + ret = -EINVAL; + break; } - return 0; +unmap_io: + iounmap(base); + return ret; } TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 983443396f8f..30e1b64d0558 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -110,7 +110,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); if (!transition_latency) - transition_latency = CPUFREQ_ETERNAL; + transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; cpumask_copy(policy->cpus, priv->cpus); policy->driver_data = priv; diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index c20d3ecc5a81..33c1df7f683e 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -443,7 +443,7 @@ soc_opp_out: } if (of_property_read_u32(np, "clock-latency", &transition_latency)) - transition_latency = CPUFREQ_ETERNAL; + transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; /* * Calculate the ramp time for max voltage change in the diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e90871092038..d0f4f7c2ae4d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1582,10 +1582,10 @@ static void update_qos_request(enum freq_qos_req_type type) continue; req = policy->driver_data; - cpufreq_cpu_put(policy); - - if (!req) + if (!req) { + cpufreq_cpu_put(policy); continue; + } if (hwp_active) intel_pstate_get_hwp_cap(cpu); @@ -1601,6 +1601,8 @@ static void update_qos_request(enum freq_qos_req_type type) if (freq_qos_update_request(req, freq) < 0) pr_warn("Failed to update freq constraint: CPU%d\n", i); + + cpufreq_cpu_put(policy); } } diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index aeb5e6304542..1f1ec43aad7c 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -238,7 +238,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000; if (!latency) - latency = CPUFREQ_ETERNAL; + latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; policy->cpuinfo.transition_latency = latency; policy->fast_switch_possible = true; diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index beb660ca240c..bb265541671a 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -15,6 +15,7 @@ #include <linux/energy_model.h> #include <linux/export.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/scmi_protocol.h> @@ -279,7 +280,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) latency = perf_ops->transition_latency_get(ph, domain); if (!latency) - latency = CPUFREQ_ETERNAL; + latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; policy->cpuinfo.transition_latency = latency; @@ -398,6 +399,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev) return true; } + /* + * Older Broadcom STB chips had a "clocks" property for CPU node(s) + * that did not match the SCMI performance protocol node, if we got + * there, it means we had such an older Device Tree, therefore return + * true to preserve backwards compatibility. + */ + if (of_machine_is_compatible("brcm,brcmstb")) + return true; + return false; } diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index a191d9bdf667..bdbc3923629e 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -157,7 +157,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy) latency = scpi_ops->get_transition_latency(cpu_dev); if (!latency) - latency = CPUFREQ_ETERNAL; + latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; policy->cpuinfo.transition_latency = latency; diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index d8ab5b01d46d..c032dd5d12d3 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c @@ -183,7 +183,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev) if (of_property_read_u32(np, "clock-latency", &spear_cpufreq.transition_latency)) - spear_cpufreq.transition_latency = CPUFREQ_ETERNAL; + spear_cpufreq.transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; cnt = of_property_count_u32_elems(np, "cpufreq_tbl"); if (cnt <= 0) { diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 7b8fcfa55038..39186008afbf 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -86,10 +86,14 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy, { struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); struct cpufreq_frequency_table *tbl = policy->freq_table + index; - unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset; + unsigned int edvd_offset; u32 edvd_val = tbl->driver_data; + u32 cpu; - writel(edvd_val, data->regs + edvd_offset); + for_each_cpu(cpu, policy->cpus) { + edvd_offset = data->cpus[cpu].edvd_offset; + writel(edvd_val, data->regs + edvd_offset); + } return 0; } diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c index 1fc9968eae19..b6b06a510fd8 100644 --- a/drivers/cpuidle/cpuidle-qcom-spm.c +++ b/drivers/cpuidle/cpuidle-qcom-spm.c @@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu) return -ENODEV; saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0); + of_node_put(cpu_node); if (!saw_node) return -ENODEV; pdev = of_find_device_by_node(saw_node); of_node_put(saw_node); - of_node_put(cpu_node); if (!pdev) return -ENODEV; data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL); - if (!data) + if (!data) { + put_device(&pdev->dev); return -ENOMEM; + } data->spm = dev_get_drvdata(&pdev->dev); + put_device(&pdev->dev); if (!data->spm) return -EINVAL; diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c index a72dfebc53ff..fa201dae1f81 100644 --- a/drivers/crypto/aspeed/aspeed-hace-crypto.c +++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c @@ -346,7 +346,7 @@ free_req: } else { dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents, - DMA_TO_DEVICE); + DMA_FROM_DEVICE); dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); } diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index dcc2380a5889..d15b2e943447 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -512,7 +512,7 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd) if (err && (dd->flags & TDES_FLAGS_FAST)) { dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); } return err; diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 45e130b901eb..17eb236e9ee4 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -888,6 +888,7 @@ static int qm_diff_regs_init(struct hisi_qm *qm, dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); ret = PTR_ERR(qm->debug.acc_diff_regs); qm->debug.acc_diff_regs = NULL; + qm->debug.qm_diff_regs = NULL; return ret; } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 34d30b783813..a11fe5e27677 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -690,6 +690,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) /* Config data buffer pasid needed by Kunpeng 920 */ hpre_config_pasid(qm); + hpre_open_sva_prefetch(qm); hpre_enable_clock_gate(qm); @@ -1366,8 +1367,6 @@ static int hpre_pf_probe_init(struct hpre *hpre) if (ret) return ret; - hpre_open_sva_prefetch(qm); - hisi_qm_dev_err_init(qm); ret = hpre_show_last_regs_init(qm); if (ret) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index b18692ee7fd5..a9550a05dfbd 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3657,6 +3657,10 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, } pdev = container_of(dev, struct pci_dev, dev); + if (pci_physfn(pdev) != qm->pdev) { + pci_err(qm->pdev, "the pdev input does not match the pf!\n"); + return -EINVAL; + } *fun_index = pdev->devfn; @@ -4272,9 +4276,6 @@ static void qm_restart_prepare(struct hisi_qm *qm) { u32 value; - if (qm->err_ini->open_sva_prefetch) - qm->err_ini->open_sva_prefetch(qm); - if (qm->ver >= QM_HW_V3) return; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 75c25f0d5f2b..9014cc36705f 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -441,6 +441,45 @@ static void sec_set_endian(struct hisi_qm *qm) writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); } +static void sec_close_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) + return; + + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); + val |= SEC_PREFETCH_DISABLE; + writel(val, qm->io_base + SEC_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, + val, !(val & SEC_SVA_DISABLE_READY), + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to close sva prefetch\n"); +} + +static void sec_open_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) + return; + + /* Enable prefetch */ + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); + val &= SEC_PREFETCH_ENABLE; + writel(val, qm->io_base + SEC_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, + val, !(val & SEC_PREFETCH_DISABLE), + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to open sva prefetch\n"); +} + static void sec_engine_sva_config(struct hisi_qm *qm) { u32 reg; @@ -474,45 +513,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm) writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); } -} - -static void sec_open_sva_prefetch(struct hisi_qm *qm) -{ - u32 val; - int ret; - - if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) - return; - - /* Enable prefetch */ - val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); - val &= SEC_PREFETCH_ENABLE; - writel(val, qm->io_base + SEC_PREFETCH_CFG); - - ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, - val, !(val & SEC_PREFETCH_DISABLE), - SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); - if (ret) - pci_err(qm->pdev, "failed to open sva prefetch\n"); -} - -static void sec_close_sva_prefetch(struct hisi_qm *qm) -{ - u32 val; - int ret; - - if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) - return; - - val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); - val |= SEC_PREFETCH_DISABLE; - writel(val, qm->io_base + SEC_PREFETCH_CFG); - - ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, - val, !(val & SEC_SVA_DISABLE_READY), - SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); - if (ret) - pci_err(qm->pdev, "failed to close sva prefetch\n"); + sec_open_sva_prefetch(qm); } static void sec_enable_clock_gate(struct hisi_qm *qm) @@ -1094,7 +1095,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) if (ret) return ret; - sec_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); sec_debug_regs_clear(qm); ret = sec_show_last_regs_init(qm); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 80c2fcb1d26d..323f53217f0c 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -449,10 +449,9 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg) return false; } -static int hisi_zip_set_high_perf(struct hisi_qm *qm) +static void hisi_zip_set_high_perf(struct hisi_qm *qm) { u32 val; - int ret; val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET); if (perf_mode == HZIP_HIGH_COMP_PERF) @@ -462,13 +461,6 @@ static int hisi_zip_set_high_perf(struct hisi_qm *qm) /* Set perf mode */ writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET); - ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET, - val, val == perf_mode, HZIP_DELAY_1_US, - HZIP_POLL_TIMEOUT_US); - if (ret) - pci_err(qm->pdev, "failed to set perf mode\n"); - - return ret; } static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) @@ -565,6 +557,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63); writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); } + hisi_zip_open_sva_prefetch(qm); /* let's open all compression/decompression cores */ dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val; @@ -576,6 +569,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL); + hisi_zip_set_high_perf(qm); hisi_zip_enable_clock_gate(qm); return 0; @@ -1171,11 +1165,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) if (ret) return ret; - ret = hisi_zip_set_high_perf(qm); - if (ret) - return ret; - - hisi_zip_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm); diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c index fdeca861933c..903f31dc663e 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c @@ -232,7 +232,7 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req) struct device *dev = rctx->hcu_dev->dev; unsigned int remainder = 0; unsigned int total; - size_t nents; + int nents; size_t count; int rc; int i; @@ -253,6 +253,9 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req) /* Determine the number of scatter gather list entries to process. */ nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder); + if (nents < 0) + return nents; + /* If there are entries to process, map them. */ if (nents) { rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents, diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 1493a373baf7..cf68b213c3e6 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -1614,7 +1614,7 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, return -EINVAL; } err_msg = "Invalid engine group format"; - strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1); + strscpy(tmp_buf, ctx->val.vstr); start = tmp_buf; has_se = has_ie = has_ae = false; diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 69d6019d8abc..ba26b4db7a60 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -252,7 +252,7 @@ static void rk_hash_unprepare(struct crypto_engine *engine, void *breq) struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); struct rk_crypto_info *rkc = rctx->dev; - dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); + dma_unmap_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); } static int rk_hash_run(struct crypto_engine *engine, void *breq) diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index e2a1e4463b6f..712df781436c 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -116,6 +116,7 @@ struct rockchip_dfi { int buswidth[DMC_MAX_CHANNELS]; int ddrmon_stride; bool ddrmon_ctrl_single; + unsigned int count_multiplier; /* number of data clocks per count */ }; static int rockchip_dfi_enable(struct rockchip_dfi *dfi) @@ -435,7 +436,7 @@ static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event) switch (event->attr.config) { case PERF_EVENT_CYCLES: - count = total.c[0].clock_cycles; + count = total.c[0].clock_cycles * dfi->count_multiplier; break; case PERF_EVENT_READ_BYTES: for (i = 0; i < dfi->max_channels; i++) @@ -656,6 +657,9 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi) break; } + if (!dfi->count_multiplier) + dfi->count_multiplier = 1; + ret = perf_pmu_register(pmu, "rockchip_ddr", -1); if (ret) return ret; @@ -752,6 +756,7 @@ static int rk3588_dfi_init(struct rockchip_dfi *dfi) dfi->max_channels = 4; dfi->ddrmon_stride = 0x4000; + dfi->count_multiplier = 2; return 0; }; diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c index 7ad5225b0381..6e608d92f7e6 100644 --- a/drivers/devfreq/mtk-cci-devfreq.c +++ b/drivers/devfreq/mtk-cci-devfreq.c @@ -386,7 +386,8 @@ out_disable_cci_clk: out_free_resources: if (regulator_is_enabled(drv->proc_reg)) regulator_disable(drv->proc_reg); - if (drv->sram_reg && regulator_is_enabled(drv->sram_reg)) + if (!IS_ERR_OR_NULL(drv->sram_reg) && + regulator_is_enabled(drv->sram_reg)) regulator_disable(drv->sram_reg); return ret; diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index ac4b3d95531c..d8cd12d906a7 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -967,6 +967,15 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan) return !!GET_BITFIELD(mcmtr, 2, 2); } +static bool i10nm_channel_disabled(struct skx_imc *imc, int chan) +{ + u32 mcmtr = I10NM_GET_MCMTR(imc, chan); + + edac_dbg(1, "mc%d ch%d mcmtr reg %x\n", imc->mc, chan, mcmtr); + + return (mcmtr == ~0 || GET_BITFIELD(mcmtr, 18, 18)); +} + static int i10nm_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg) { @@ -980,6 +989,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci, if (!imc->mbase) continue; + if (i10nm_channel_disabled(imc, i)) { + edac_dbg(1, "mc%d ch%d is disabled.\n", imc->mc, i); + continue; + } + ndimms = 0; if (res_cfg->type != GNR) diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c index d349766bc0b2..f78b87f33403 100644 --- a/drivers/firmware/arm_scmi/transports/virtio.c +++ b/drivers/firmware/arm_scmi/transports/virtio.c @@ -870,6 +870,9 @@ static int scmi_vio_probe(struct virtio_device *vdev) /* Ensure initialized scmi_vdev is visible */ smp_store_mb(scmi_vdev, vdev); + /* Set device ready */ + virtio_device_ready(vdev); + ret = platform_driver_register(&scmi_virtio_driver); if (ret) { vdev->priv = NULL; diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig index f2fdd3756648..179f5d46d8dd 100644 --- a/drivers/firmware/meson/Kconfig +++ b/drivers/firmware/meson/Kconfig @@ -5,7 +5,7 @@ config MESON_SM tristate "Amlogic Secure Monitor driver" depends on ARCH_MESON || COMPILE_TEST - default y + default ARCH_MESON depends on ARM64_4K_PAGES help Say y here to enable the Amlogic secure monitor driver diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index f25a9746249b..3ab67aaa9e5d 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -232,11 +232,16 @@ EXPORT_SYMBOL(meson_sm_call_write); struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node) { struct platform_device *pdev = of_find_device_by_node(sm_node); + struct meson_sm_firmware *fw; if (!pdev) return NULL; - return platform_get_drvdata(pdev); + fw = platform_get_drvdata(pdev); + + put_device(&pdev->dev); + + return fw; } EXPORT_SYMBOL_GPL(meson_sm_get); diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c index cfa7b0a50c8e..03b16b8f639a 100644 --- a/drivers/gpio/gpio-wcd934x.c +++ b/drivers/gpio/gpio-wcd934x.c @@ -102,7 +102,7 @@ static int wcd_gpio_probe(struct platform_device *pdev) chip->base = -1; chip->ngpio = WCD934X_NPINS; chip->label = dev_name(dev); - chip->can_sleep = false; + chip->can_sleep = true; return devm_gpiochip_add_data(dev, chip, data); } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 805d6662c88b..21376d98ee49 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -623,7 +623,22 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev, * * @handle: handle used to pass amdgpu_device pointer * - * Initialize the hardware, boot up the VCPU and do some testing + * Initialize the hardware, boot up the VCPU and do some testing. + * + * On SI, the UVD is meant to be used in a specific power state, + * or alternatively the driver can manually enable its clock. + * In amdgpu we use the dedicated UVD power state when DPM is enabled. + * Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state + * for the SMU and afterwards enables the UVD clock. + * This is automatically done by amdgpu_uvd_ring_begin_use when work + * is submitted to the UVD ring. Here, we have to call it manually + * in order to power up UVD before firmware validation. + * + * Note that we must not disable the UVD clock here, as that would + * cause the ring test to fail. However, UVD is powered off + * automatically after the ring test: amdgpu_uvd_ring_end_use calls + * the UVD idle work handler which will disable the UVD clock when + * all fences are signalled. */ static int uvd_v3_1_hw_init(void *handle) { @@ -633,6 +648,15 @@ static int uvd_v3_1_hw_init(void *handle) int r; uvd_v3_1_mc_resume(adev); + uvd_v3_1_enable_mgcg(adev, true); + + /* Make sure UVD is powered during FW validation. + * It's going to be automatically powered off after the ring test. + */ + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + else + amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); r = uvd_v3_1_fw_validate(adev); if (r) { @@ -640,9 +664,6 @@ static int uvd_v3_1_hw_init(void *handle) return r; } - uvd_v3_1_enable_mgcg(adev, true); - amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - uvd_v3_1_start(adev); r = amdgpu_ring_test_helper(ring); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 3e9e0f36cd3f..a8d4b3a3e77a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -4231,7 +4231,7 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, r = svm_range_get_attr(p, mm, start, size, nattrs, attrs); break; default: - r = EINVAL; + r = -EINVAL; break; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b02ff92bae0b..ffa0d7483ffc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2027,6 +2027,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.disable_ips_in_vpb = 0; + /* DCN35 and above supports dynamic DTBCLK switch */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0)) + init_data.flags.allow_0_dtb_clk = true; + /* Enable DWB for tested platforms only */ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) init_data.num_virtual_links = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index 2b1673d69ea8..1ab5ae9b5ea5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration( REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0); if (data->taps.h_taps + data->taps.v_taps <= 2) { - /* Set bypass */ - - /* DCE6 has no SCL_MODE register, skip scale mode programming */ + /* Disable scaler functionality */ + REG_WRITE(SCL_SCALER_ENABLE, 0); + /* Clear registers that can cause glitches even when the scaler is off */ + REG_WRITE(SCL_TAP_CONTROL, 0); + REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0); + REG_WRITE(SCL_F_SHARP_CONTROL, 0); return false; } @@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration( SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1, SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1); - /* DCE6 has no SCL_MODE register, skip scale mode programming */ + REG_WRITE(SCL_SCALER_ENABLE, 1); /* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */ @@ -502,6 +505,8 @@ static void dce60_transform_set_scaler( REG_SET(DC_LB_MEM_SIZE, 0, DC_LB_MEM_SIZE, xfm_dce->lb_memory_size); + REG_WRITE(SCL_UPDATE, 0x00010000); + /* Clear SCL_F_SHARP_CONTROL value to 0 */ REG_WRITE(SCL_F_SHARP_CONTROL, 0); @@ -527,8 +532,7 @@ static void dce60_transform_set_scaler( if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) { /* 4. Program vertical filters */ if (xfm_dce->filter_v == NULL) - REG_SET(SCL_VERT_FILTER_CONTROL, 0, - SCL_V_2TAP_HARDCODE_COEF_EN, 0); + REG_WRITE(SCL_VERT_FILTER_CONTROL, 0); program_multi_taps_filter( xfm_dce, data->taps.v_taps, @@ -542,8 +546,7 @@ static void dce60_transform_set_scaler( /* 5. Program horizontal filters */ if (xfm_dce->filter_h == NULL) - REG_SET(SCL_HORZ_FILTER_CONTROL, 0, - SCL_H_2TAP_HARDCODE_COEF_EN, 0); + REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0); program_multi_taps_filter( xfm_dce, data->taps.h_taps, @@ -566,6 +569,8 @@ static void dce60_transform_set_scaler( /* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */ /* DCE6 DATA_FORMAT register does not support ALPHA_EN */ + + REG_WRITE(SCL_UPDATE, 0); } #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h index cbce194ec7b8..eb716e8337e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h @@ -155,6 +155,9 @@ SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \ SRI(VIEWPORT_START, SCL, id), \ SRI(VIEWPORT_SIZE, SCL, id), \ + SRI(SCL_SCALER_ENABLE, SCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \ SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \ SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \ SRI(SCL_VERT_FILTER_INIT, SCL, id), \ @@ -590,6 +593,7 @@ struct dce_transform_registers { uint32_t SCL_VERT_FILTER_SCALE_RATIO; uint32_t SCL_HORZ_FILTER_INIT; #if defined(CONFIG_DRM_AMD_DC_SI) + uint32_t SCL_SCALER_ENABLE; uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA; uint32_t SCL_HORZ_FILTER_INIT_CHROMA; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c index 9ba6cb67655f..6c75aa82327a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c @@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs, if (dual_plane) { unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); - ; if (src->sw_mode == dm_sw_linear) ASSERT(p1_pte_row_height_linear >= 8); diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h index 9de01ae574c0..067eddd9c62d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h @@ -4115,6 +4115,7 @@ #define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55 #define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40 #define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41 +#define mmSCL0_SCL_SCALER_ENABLE 0x1B42 #define mmSCL0_SCL_CONTROL 0x1B44 #define mmSCL0_SCL_DEBUG 0x1B6A #define mmSCL0_SCL_DEBUG2 0x1B69 @@ -4144,6 +4145,7 @@ #define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55 #define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40 #define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41 +#define mmSCL1_SCL_SCALER_ENABLE 0x1E42 #define mmSCL1_SCL_CONTROL 0x1E44 #define mmSCL1_SCL_DEBUG 0x1E6A #define mmSCL1_SCL_DEBUG2 0x1E69 @@ -4173,6 +4175,7 @@ #define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155 #define mmSCL2_SCL_COEF_RAM_SELECT 0x4140 #define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141 +#define mmSCL2_SCL_SCALER_ENABLE 0x4142 #define mmSCL2_SCL_CONTROL 0x4144 #define mmSCL2_SCL_DEBUG 0x416A #define mmSCL2_SCL_DEBUG2 0x4169 @@ -4202,6 +4205,7 @@ #define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455 #define mmSCL3_SCL_COEF_RAM_SELECT 0x4440 #define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441 +#define mmSCL3_SCL_SCALER_ENABLE 0x4442 #define mmSCL3_SCL_CONTROL 0x4444 #define mmSCL3_SCL_DEBUG 0x446A #define mmSCL3_SCL_DEBUG2 0x4469 @@ -4231,6 +4235,7 @@ #define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755 #define mmSCL4_SCL_COEF_RAM_SELECT 0x4740 #define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741 +#define mmSCL4_SCL_SCALER_ENABLE 0x4742 #define mmSCL4_SCL_CONTROL 0x4744 #define mmSCL4_SCL_DEBUG 0x476A #define mmSCL4_SCL_DEBUG2 0x4769 @@ -4260,6 +4265,7 @@ #define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55 #define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40 #define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41 +#define mmSCL5_SCL_SCALER_ENABLE 0x4A42 #define mmSCL5_SCL_CONTROL 0x4A44 #define mmSCL5_SCL_DEBUG 0x4A6A #define mmSCL5_SCL_DEBUG2 0x4A69 @@ -4287,6 +4293,7 @@ #define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55 #define mmSCL_COEF_RAM_SELECT 0x1B40 #define mmSCL_COEF_RAM_TAP_DATA 0x1B41 +#define mmSCL_SCALER_ENABLE 0x1B42 #define mmSCL_CONTROL 0x1B44 #define mmSCL_DEBUG 0x1B6A #define mmSCL_DEBUG2 0x1B69 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h index bd8085ec54ed..da5596fbfdcb 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h @@ -8648,6 +8648,8 @@ #define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000 #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000 +#define SCL_SCALER_ENABLE__SCL_SCALE_EN_MASK 0x00000001L +#define SCL_SCALER_ENABLE__SCL_SCALE_EN__SHIFT 0x00000000 #define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L #define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000 #define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c index 42efe838fa85..2d2d2d5e6763 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c @@ -66,6 +66,13 @@ u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) (amdgpu_crtc->v_border * 2)); vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; + + /* we have issues with mclk switching with + * refresh rates over 120 hz on the non-DC code. + */ + if (drm_mode_vrefresh(&amdgpu_crtc->hw_mode) > 120) + vblank_time_us = 0; + break; } } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index a5ad1b60597e..82167eca2668 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -3066,7 +3066,13 @@ static bool si_dpm_vblank_too_short(void *handle) /* we never hit the non-gddr5 limit so disable it */ u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; - if (vblank_time < switch_limit) + /* Consider zero vblank time too short and disable MCLK switching. + * Note that the vblank time is set to maximum when no displays are attached, + * so we'll still enable MCLK switching in that case. + */ + if (vblank_time == 0) + return true; + else if (vblank_time < switch_limit) return true; else return false; @@ -3424,12 +3430,14 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, { struct si_ps *ps = si_get_ps(rps); struct amdgpu_clock_and_voltage_limits *max_limits; + struct amdgpu_connector *conn; bool disable_mclk_switching = false; bool disable_sclk_switching = false; u32 mclk, sclk; u16 vddc, vddci, min_vce_voltage = 0; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; u32 max_sclk = 0, max_mclk = 0; + u32 high_pixelclock_count = 0; int i; if (adev->asic_type == CHIP_HAINAN) { @@ -3457,6 +3465,35 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, } } + /* We define "high pixelclock" for SI as higher than necessary for 4K 30Hz. + * For example, 4K 60Hz and 1080p 144Hz fall into this category. + * Find number of such displays connected. + */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (!(adev->pm.dpm.new_active_crtcs & (1 << i)) || + !adev->mode_info.crtcs[i]->enabled) + continue; + + conn = to_amdgpu_connector(adev->mode_info.crtcs[i]->connector); + + if (conn->pixelclock_for_modeset > 297000) + high_pixelclock_count++; + } + + /* These are some ad-hoc fixes to some issues observed with SI GPUs. + * They are necessary because we don't have something like dce_calcs + * for these GPUs to calculate bandwidth requirements. + */ + if (high_pixelclock_count) { + /* On Oland, we observe some flickering when two 4K 60Hz + * displays are connected, possibly because voltage is too low. + * Raise the voltage by requiring a higher SCLK. + * (Voltage cannot be adjusted independently without also SCLK.) + */ + if (high_pixelclock_count > 1 && adev->asic_type == CHIP_OLAND) + disable_sclk_switching = true; + } + if (rps->vce_active) { rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; @@ -5613,14 +5650,10 @@ static int si_populate_smc_t(struct amdgpu_device *adev, static int si_disable_ulv(struct amdgpu_device *adev) { - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - - if (ulv->supported) - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? - 0 : -EINVAL; + PPSMC_Result r; - return 0; + r = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV); + return (r == PPSMC_Result_OK) ? 0 : -EINVAL; } static bool si_is_state_ulv_compatible(struct amdgpu_device *adev, @@ -5793,9 +5826,9 @@ static int si_upload_smc_data(struct amdgpu_device *adev) { struct amdgpu_crtc *amdgpu_crtc = NULL; int i; - - if (adev->pm.dpm.new_active_crtc_count == 0) - return 0; + u32 crtc_index = 0; + u32 mclk_change_block_cp_min = 0; + u32 mclk_change_block_cp_max = 0; for (i = 0; i < adev->mode_info.num_crtc; i++) { if (adev->pm.dpm.new_active_crtcs & (1 << i)) { @@ -5804,26 +5837,31 @@ static int si_upload_smc_data(struct amdgpu_device *adev) } } - if (amdgpu_crtc == NULL) - return 0; + /* When a display is plugged in, program these so that the SMC + * performs MCLK switching when it doesn't cause flickering. + * When no display is plugged in, there is no need to restrict + * MCLK switching, so program them to zero. + */ + if (adev->pm.dpm.new_active_crtc_count && amdgpu_crtc) { + crtc_index = amdgpu_crtc->crtc_id; - if (amdgpu_crtc->line_time <= 0) - return 0; + if (amdgpu_crtc->line_time) { + mclk_change_block_cp_min = 200 / amdgpu_crtc->line_time; + mclk_change_block_cp_max = 100 / amdgpu_crtc->line_time; + } + } - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_crtc_index, - amdgpu_crtc->crtc_id) != PPSMC_Result_OK) - return 0; + si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_crtc_index, + crtc_index); - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, - amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK) - return 0; + si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, + mclk_change_block_cp_min); - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, - amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK) - return 0; + si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, + mclk_change_block_cp_max); return 0; } diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 3eb955333c80..66369b6a023f 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -101,6 +101,7 @@ config DRM_ITE_IT6505 select EXTCON select CRYPTO select CRYPTO_HASH + select REGMAP_I2C help ITE IT6505 DisplayPort bridge chip driver. diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 07035ab77b79..4597fdb65358 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -445,7 +445,7 @@ static void _dpu_encoder_phys_wb_handle_wbdone_timeout( static int dpu_encoder_phys_wb_wait_for_commit_done( struct dpu_encoder_phys *phys_enc) { - unsigned long ret; + int ret; struct dpu_encoder_wait_info wait_info; struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2016c1e7242f..fc433f39c127 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -852,7 +852,7 @@ done: nvif_vmm_put(vmm, &old_mem->vma[1]); nvif_vmm_put(vmm, &old_mem->vma[0]); } - return 0; + return ret; } static int diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c index 5bbea734123b..ee04c55175bb 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c @@ -161,7 +161,7 @@ static int nt35560_set_brightness(struct backlight_device *bl) par = 0x00; ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, &par, 1); - if (ret) { + if (ret < 0) { dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret); return ret; } diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index 03eb7d52209a..9360d486e52e 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -1032,14 +1032,15 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data, ret = group_priority_permit(file, args->priority); if (ret) - return ret; + goto out; ret = panthor_group_create(pfile, args, queue_args); - if (ret >= 0) { - args->group_handle = ret; - ret = 0; - } + if (ret < 0) + goto out; + args->group_handle = ret; + ret = 0; +out: kvfree(queue_args); return ret; } diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index ac77d1246b94..811265648a58 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -1408,7 +1408,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, unsigned block_align, unsigned height_align, unsigned base_align, unsigned *l0_size, unsigned *mipmap_size) { - unsigned offset, i, level; + unsigned offset, i; unsigned width, height, depth, size; unsigned blocksize; unsigned nbx, nby; @@ -1420,7 +1420,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, w0 = r600_mip_minify(w0, 0); h0 = r600_mip_minify(h0, 0); d0 = r600_mip_minify(d0, 0); - for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { + for (i = 0, offset = 0; i < nlevels; i++) { width = r600_mip_minify(w0, i); nbx = r600_fmt_get_nblocksx(format, width); diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c index 92f4261305bd..f2ae5d17ea60 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c @@ -576,7 +576,10 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, udelay(10); rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); - ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN; + rcar_mipi_dsi_clr(dsi, TXSETR, TXSETR_LANECNT_MASK); + rcar_mipi_dsi_set(dsi, TXSETR, dsi->lanes - 1); + + ppisetr = ((BIT(dsi->lanes) - 1) & PPISETR_DLEN_MASK) | PPISETR_CLEN; rcar_mipi_dsi_write(dsi, PPISETR, ppisetr); rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h index a6b276f1d6ee..a54c7eb4113b 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h @@ -12,6 +12,9 @@ #define LINKSR_LPBUSY (1 << 1) #define LINKSR_HSBUSY (1 << 0) +#define TXSETR 0x100 +#define TXSETR_LANECNT_MASK (0x3 << 0) + /* * Video Mode Register */ @@ -80,10 +83,7 @@ * PHY-Protocol Interface (PPI) Registers */ #define PPISETR 0x700 -#define PPISETR_DLEN_0 (0x1 << 0) -#define PPISETR_DLEN_1 (0x3 << 0) -#define PPISETR_DLEN_2 (0x7 << 0) -#define PPISETR_DLEN_3 (0xf << 0) +#define PPISETR_DLEN_MASK (0xf << 0) #define PPISETR_CLEN (1 << 8) #define PPICLCR 0x710 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index ea741bc4ac3f..8b72848bb25c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1515,6 +1515,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { struct vmw_bo *vmw_bo = NULL; + struct vmw_resource *res; struct vmw_surface *srf = NULL; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA); int ret; @@ -1550,18 +1551,24 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ? VMW_RES_DIRTY_SET : 0; - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - dirty, user_surface_converter, - &cmd->body.host.sid, NULL); + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, dirty, + user_surface_converter, &cmd->body.host.sid, + NULL); if (unlikely(ret != 0)) { if (unlikely(ret != -ERESTARTSYS)) VMW_DEBUG_USER("could not find surface for DMA.\n"); return ret; } - srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); + res = sw_context->res_cache[vmw_res_surface].res; + if (!res) { + VMW_DEBUG_USER("Invalid DMA surface.\n"); + return -EINVAL; + } - vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header); + srf = vmw_res_to_srf(res); + vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, + header); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index e7625b3f71e0..80e8bbc3021d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -309,8 +309,10 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx, hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key); } node->res = vmw_resource_reference_unless_doomed(res); - if (!node->res) + if (!node->res) { + hash_del_rcu(&node->hash.head); return -ESRCH; + } node->first_usage = 1; if (!res->dev_priv->has_mob) { @@ -637,7 +639,7 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx) hash_del_rcu(&val->hash.head); list_for_each_entry(val, &ctx->resource_ctx_list, head) - hash_del_rcu(&entry->hash.head); + hash_del_rcu(&val->hash.head); ctx->sw_context = NULL; } diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c index 82750520a90a..f14a3cc7d117 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_group.c +++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c @@ -237,17 +237,13 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group err = q->ops->suspend_wait(q); if (err) - goto err_suspend; + return err; } if (need_resume) xe_hw_engine_group_resume_faulting_lr_jobs(group); return 0; - -err_suspend: - up_write(&group->mode_sem); - return err; } /** diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 6e7c940d7e22..71a5e852fbac 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -277,8 +277,7 @@ static int query_mem_regions(struct xe_device *xe, mem_regions->mem_regions[0].instance = 0; mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; - if (perfmon_capable()) - mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); + mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); mem_regions->num_mem_regions = 1; for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { @@ -294,13 +293,11 @@ static int query_mem_regions(struct xe_device *xe, mem_regions->mem_regions[mem_regions->num_mem_regions].total_size = man->size; - if (perfmon_capable()) { - xe_ttm_vram_get_used(man, - &mem_regions->mem_regions - [mem_regions->num_mem_regions].used, - &mem_regions->mem_regions - [mem_regions->num_mem_regions].cpu_visible_used); - } + xe_ttm_vram_get_used(man, + &mem_regions->mem_regions + [mem_regions->num_mem_regions].used, + &mem_regions->mem_regions + [mem_regions->num_mem_regions].cpu_visible_used); mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size = xe_ttm_vram_get_cpu_visible_size(man); diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index c887f48756f4..bbd6f23bce78 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -394,27 +394,15 @@ static int hidraw_revoke(struct hidraw_list *list) return 0; } -static long hidraw_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long hidraw_fixed_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, + void __user *arg) { - struct inode *inode = file_inode(file); - unsigned int minor = iminor(inode); - long ret = 0; - struct hidraw *dev; - struct hidraw_list *list = file->private_data; - void __user *user_arg = (void __user*) arg; - - down_read(&minors_rwsem); - dev = hidraw_table[minor]; - if (!dev || !dev->exist || hidraw_is_revoked(list)) { - ret = -ENODEV; - goto out; - } + struct hid_device *hid = dev->hid; switch (cmd) { case HIDIOCGRDESCSIZE: - if (put_user(dev->hid->rsize, (int __user *)arg)) - ret = -EFAULT; + if (put_user(hid->rsize, (int __user *)arg)) + return -EFAULT; break; case HIDIOCGRDESC: @@ -422,113 +410,145 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, __u32 len; if (get_user(len, (int __user *)arg)) - ret = -EFAULT; - else if (len > HID_MAX_DESCRIPTOR_SIZE - 1) - ret = -EINVAL; - else if (copy_to_user(user_arg + offsetof( - struct hidraw_report_descriptor, - value[0]), - dev->hid->rdesc, - min(dev->hid->rsize, len))) - ret = -EFAULT; + return -EFAULT; + + if (len > HID_MAX_DESCRIPTOR_SIZE - 1) + return -EINVAL; + + if (copy_to_user(arg + offsetof( + struct hidraw_report_descriptor, + value[0]), + hid->rdesc, + min(hid->rsize, len))) + return -EFAULT; + break; } case HIDIOCGRAWINFO: { struct hidraw_devinfo dinfo; - dinfo.bustype = dev->hid->bus; - dinfo.vendor = dev->hid->vendor; - dinfo.product = dev->hid->product; - if (copy_to_user(user_arg, &dinfo, sizeof(dinfo))) - ret = -EFAULT; + dinfo.bustype = hid->bus; + dinfo.vendor = hid->vendor; + dinfo.product = hid->product; + if (copy_to_user(arg, &dinfo, sizeof(dinfo))) + return -EFAULT; break; } case HIDIOCREVOKE: { - if (user_arg) - ret = -EINVAL; - else - ret = hidraw_revoke(list); - break; + struct hidraw_list *list = file->private_data; + + if (arg) + return -EINVAL; + + return hidraw_revoke(list); } default: - { - struct hid_device *hid = dev->hid; - if (_IOC_TYPE(cmd) != 'H') { - ret = -EINVAL; - break; - } + /* + * None of the above ioctls can return -EAGAIN, so + * use it as a marker that we need to check variable + * length ioctls. + */ + return -EAGAIN; + } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT); - break; - } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT); - break; - } + return 0; +} - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSINPUT(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT); - break; - } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGINPUT(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT); - break; - } +static long hidraw_rw_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, + void __user *user_arg) +{ + int len = _IOC_SIZE(cmd); + + switch (cmd & ~IOCSIZE_MASK) { + case HIDIOCSFEATURE(0): + return hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT); + case HIDIOCGFEATURE(0): + return hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT); + case HIDIOCSINPUT(0): + return hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT); + case HIDIOCGINPUT(0): + return hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT); + case HIDIOCSOUTPUT(0): + return hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT); + case HIDIOCGOUTPUT(0): + return hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT); + } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSOUTPUT(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT); - break; - } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGOUTPUT(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT); - break; - } + return -EINVAL; +} - /* Begin Read-only ioctls. */ - if (_IOC_DIR(cmd) != _IOC_READ) { - ret = -EINVAL; - break; - } +static long hidraw_ro_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, + void __user *user_arg) +{ + struct hid_device *hid = dev->hid; + int len = _IOC_SIZE(cmd); + int field_len; + + switch (cmd & ~IOCSIZE_MASK) { + case HIDIOCGRAWNAME(0): + field_len = strlen(hid->name) + 1; + if (len > field_len) + len = field_len; + return copy_to_user(user_arg, hid->name, len) ? -EFAULT : len; + case HIDIOCGRAWPHYS(0): + field_len = strlen(hid->phys) + 1; + if (len > field_len) + len = field_len; + return copy_to_user(user_arg, hid->phys, len) ? -EFAULT : len; + case HIDIOCGRAWUNIQ(0): + field_len = strlen(hid->uniq) + 1; + if (len > field_len) + len = field_len; + return copy_to_user(user_arg, hid->uniq, len) ? -EFAULT : len; + } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWNAME(0))) { - int len = strlen(hid->name) + 1; - if (len > _IOC_SIZE(cmd)) - len = _IOC_SIZE(cmd); - ret = copy_to_user(user_arg, hid->name, len) ? - -EFAULT : len; - break; - } + return -EINVAL; +} - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWPHYS(0))) { - int len = strlen(hid->phys) + 1; - if (len > _IOC_SIZE(cmd)) - len = _IOC_SIZE(cmd); - ret = copy_to_user(user_arg, hid->phys, len) ? - -EFAULT : len; - break; - } +static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct inode *inode = file_inode(file); + unsigned int minor = iminor(inode); + struct hidraw *dev; + struct hidraw_list *list = file->private_data; + void __user *user_arg = (void __user *)arg; + int ret; - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) { - int len = strlen(hid->uniq) + 1; - if (len > _IOC_SIZE(cmd)) - len = _IOC_SIZE(cmd); - ret = copy_to_user(user_arg, hid->uniq, len) ? - -EFAULT : len; - break; - } - } + down_read(&minors_rwsem); + dev = hidraw_table[minor]; + if (!dev || !dev->exist || hidraw_is_revoked(list)) { + ret = -ENODEV; + goto out; + } + + if (_IOC_TYPE(cmd) != 'H') { + ret = -EINVAL; + goto out; + } + if (_IOC_NR(cmd) > HIDIOCTL_LAST || _IOC_NR(cmd) == 0) { ret = -ENOTTY; + goto out; } + + ret = hidraw_fixed_size_ioctl(file, dev, cmd, user_arg); + if (ret != -EAGAIN) + goto out; + + switch (_IOC_DIR(cmd)) { + case (_IOC_READ | _IOC_WRITE): + ret = hidraw_rw_variable_size_ioctl(file, dev, cmd, user_arg); + break; + case _IOC_READ: + ret = hidraw_ro_variable_size_ioctl(file, dev, cmd, user_arg); + break; + default: + /* Any other IOC_DIR is wrong */ + ret = -EINVAL; + } + out: up_read(&minors_rwsem); return ret; diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c index c25a54d5b39a..0ba9195c9d71 100644 --- a/drivers/hwmon/mlxreg-fan.c +++ b/drivers/hwmon/mlxreg-fan.c @@ -113,8 +113,8 @@ struct mlxreg_fan { int divider; }; -static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, - unsigned long state); +static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state, bool thermal); static int mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, @@ -224,8 +224,9 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, * last thermal state. */ if (pwm->last_hwmon_state >= pwm->last_thermal_state) - return mlxreg_fan_set_cur_state(pwm->cdev, - pwm->last_hwmon_state); + return _mlxreg_fan_set_cur_state(pwm->cdev, + pwm->last_hwmon_state, + false); return 0; } return regmap_write(fan->regmap, pwm->reg, val); @@ -357,9 +358,8 @@ static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev, return 0; } -static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, - unsigned long state) - +static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state, bool thermal) { struct mlxreg_fan_pwm *pwm = cdev->devdata; struct mlxreg_fan *fan = pwm->fan; @@ -369,7 +369,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, return -EINVAL; /* Save thermal state. */ - pwm->last_thermal_state = state; + if (thermal) + pwm->last_thermal_state = state; state = max_t(unsigned long, state, pwm->last_hwmon_state); err = regmap_write(fan->regmap, pwm->reg, @@ -381,6 +382,13 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, return 0; } +static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) + +{ + return _mlxreg_fan_set_cur_state(cdev, state, true); +} + static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = { .get_max_state = mlxreg_fan_get_max_state, .get_cur_state = mlxreg_fan_get_cur_state, diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c index 25fd02955c38..abfff42b20c9 100644 --- a/drivers/hwtracing/coresight/coresight-catu.c +++ b/drivers/hwtracing/coresight/coresight-catu.c @@ -521,6 +521,10 @@ static int __catu_probe(struct device *dev, struct resource *res) struct coresight_platform_data *pdata = NULL; void __iomem *base; + drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk"); + if (IS_ERR(drvdata->atclk)) + return PTR_ERR(drvdata->atclk); + catu_desc.name = coresight_alloc_device_name(&catu_devs, dev); if (!catu_desc.name) return -ENOMEM; @@ -668,18 +672,26 @@ static int catu_runtime_suspend(struct device *dev) { struct catu_drvdata *drvdata = dev_get_drvdata(dev); - if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) - clk_disable_unprepare(drvdata->pclk); + clk_disable_unprepare(drvdata->atclk); + clk_disable_unprepare(drvdata->pclk); + return 0; } static int catu_runtime_resume(struct device *dev) { struct catu_drvdata *drvdata = dev_get_drvdata(dev); + int ret; - if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) - clk_prepare_enable(drvdata->pclk); - return 0; + ret = clk_prepare_enable(drvdata->pclk); + if (ret) + return ret; + + ret = clk_prepare_enable(drvdata->atclk); + if (ret) + clk_disable_unprepare(drvdata->pclk); + + return ret; } #endif diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h index 755776cd19c5..6e6b7aac206d 100644 --- a/drivers/hwtracing/coresight/coresight-catu.h +++ b/drivers/hwtracing/coresight/coresight-catu.h @@ -62,6 +62,7 @@ struct catu_drvdata { struct clk *pclk; + struct clk *atclk; void __iomem *base; struct coresight_device *csdev; int irq; diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index b7941d8abbfe..f20d4cab8f1d 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1200,8 +1200,9 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) goto out_unlock; } - if (csdev->type == CORESIGHT_DEV_TYPE_SINK || - csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { + if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && + sink_ops(csdev)->alloc_buffer) { ret = etm_perf_add_symlink_sink(csdev); if (ret) { diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index be8b46f26ddc..7b9eaeb115d2 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -481,7 +481,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR); etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR); } - etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR); + if (drvdata->numextinsel) + etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR); for (i = 0; i < drvdata->nr_cntr; i++) { etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i)); etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i)); @@ -1362,6 +1363,7 @@ static void etm4_init_arch_data(void *info) etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5); /* NUMEXTIN, bits[8:0] number of external inputs implemented */ drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5); + drvdata->numextinsel = FIELD_GET(TRCIDR5_NUMEXTINSEL_MASK, etmidr5); /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */ drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5); /* ATBTRIG, bit[22] implementation can support ATB triggers? */ @@ -1789,7 +1791,9 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR); state->trcseqstr = etm4x_read32(csa, TRCSEQSTR); } - state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR); + + if (drvdata->numextinsel) + state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR); for (i = 0; i < drvdata->nr_cntr; i++) { state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i)); @@ -1921,7 +1925,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR); etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR); } - etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR); + if (drvdata->numextinsel) + etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR); for (i = 0; i < drvdata->nr_cntr; i++) { etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i)); @@ -2152,6 +2157,10 @@ static int etm4_probe(struct device *dev) if (WARN_ON(!drvdata)) return -ENOMEM; + drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk"); + if (IS_ERR(drvdata->atclk)) + return PTR_ERR(drvdata->atclk); + if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE) pm_save_enable = coresight_loses_context_with_cpu(dev) ? PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER; @@ -2400,8 +2409,8 @@ static int etm4_runtime_suspend(struct device *dev) { struct etmv4_drvdata *drvdata = dev_get_drvdata(dev); - if (drvdata->pclk && !IS_ERR(drvdata->pclk)) - clk_disable_unprepare(drvdata->pclk); + clk_disable_unprepare(drvdata->atclk); + clk_disable_unprepare(drvdata->pclk); return 0; } @@ -2409,11 +2418,17 @@ static int etm4_runtime_suspend(struct device *dev) static int etm4_runtime_resume(struct device *dev) { struct etmv4_drvdata *drvdata = dev_get_drvdata(dev); + int ret; - if (drvdata->pclk && !IS_ERR(drvdata->pclk)) - clk_prepare_enable(drvdata->pclk); + ret = clk_prepare_enable(drvdata->pclk); + if (ret) + return ret; - return 0; + ret = clk_prepare_enable(drvdata->atclk); + if (ret) + clk_disable_unprepare(drvdata->pclk); + + return ret; } #endif diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index 9e9165f62e81..3683966bd060 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -162,6 +162,7 @@ #define TRCIDR4_NUMVMIDC_MASK GENMASK(31, 28) #define TRCIDR5_NUMEXTIN_MASK GENMASK(8, 0) +#define TRCIDR5_NUMEXTINSEL_MASK GENMASK(11, 9) #define TRCIDR5_TRACEIDSIZE_MASK GENMASK(21, 16) #define TRCIDR5_ATBTRIG BIT(22) #define TRCIDR5_LPOVERRIDE BIT(23) @@ -919,7 +920,8 @@ struct etmv4_save_state { /** * struct etm4_drvdata - specifics associated to an ETM component - * @pclk APB clock if present, otherwise NULL + * @pclk: APB clock if present, otherwise NULL + * @atclk: Optional clock for the core parts of the ETMv4. * @base: Memory mapped base address for this component. * @csdev: Component vitals needed by the framework. * @spinlock: Only one at a time pls. @@ -987,6 +989,7 @@ struct etmv4_save_state { */ struct etmv4_drvdata { struct clk *pclk; + struct clk *atclk; void __iomem *base; struct coresight_device *csdev; spinlock_t spinlock; @@ -998,6 +1001,7 @@ struct etmv4_drvdata { u8 nr_cntr; u8 nr_ext_inp; u8 numcidc; + u8 numextinsel; u8 numvmidc; u8 nrseqstate; u8 nr_event; diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c index 475fa4bb6813..96ef2517dd43 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-core.c +++ b/drivers/hwtracing/coresight/coresight-tmc-core.c @@ -480,6 +480,10 @@ static int __tmc_probe(struct device *dev, struct resource *res) struct coresight_desc desc = { 0 }; struct coresight_dev_list *dev_list = NULL; + drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk"); + if (IS_ERR(drvdata->atclk)) + return PTR_ERR(drvdata->atclk); + ret = -ENOMEM; /* Validity for the resource is already checked by the AMBA core */ @@ -700,18 +704,26 @@ static int tmc_runtime_suspend(struct device *dev) { struct tmc_drvdata *drvdata = dev_get_drvdata(dev); - if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) - clk_disable_unprepare(drvdata->pclk); + clk_disable_unprepare(drvdata->atclk); + clk_disable_unprepare(drvdata->pclk); + return 0; } static int tmc_runtime_resume(struct device *dev) { struct tmc_drvdata *drvdata = dev_get_drvdata(dev); + int ret; - if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) - clk_prepare_enable(drvdata->pclk); - return 0; + ret = clk_prepare_enable(drvdata->pclk); + if (ret) + return ret; + + ret = clk_prepare_enable(drvdata->atclk); + if (ret) + clk_disable_unprepare(drvdata->pclk); + + return ret; } #endif diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h index 2671926be62a..2a53acbb5990 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.h +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -166,6 +166,7 @@ struct etr_buf { /** * struct tmc_drvdata - specifics associated to an TMC component + * @atclk: optional clock for the core parts of the TMC. * @pclk: APB clock if present, otherwise NULL * @base: memory mapped base address for this component. * @csdev: component vitals needed by the framework. @@ -191,6 +192,7 @@ struct etr_buf { * @perf_buf: PERF buffer for ETR. */ struct tmc_drvdata { + struct clk *atclk; struct clk *pclk; void __iomem *base; struct coresight_device *csdev; diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c index bfca103f9f84..865fd6273e5e 100644 --- a/drivers/hwtracing/coresight/coresight-tpda.c +++ b/drivers/hwtracing/coresight/coresight-tpda.c @@ -71,12 +71,15 @@ static int tpdm_read_element_size(struct tpda_drvdata *drvdata, if (tpdm_has_dsb_dataset(tpdm_data)) { rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent), "qcom,dsb-element-bits", &drvdata->dsb_esize); + if (rc) + goto out; } if (tpdm_has_cmb_dataset(tpdm_data)) { rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent), "qcom,cmb-element-bits", &drvdata->cmb_esize); } +out: if (rc) dev_warn_once(&csdev->dev, "Failed to read TPDM Element size: %d\n", rc); diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index 96a32b213669..d771980a278d 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -22,7 +22,8 @@ #include "coresight-self-hosted-trace.h" #include "coresight-trbe.h" -#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) +#define PERF_IDX2OFF(idx, buf) \ + ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT)) /* * A padding packet that will help the user space tools @@ -744,12 +745,12 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev, buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event)); if (!buf) - return ERR_PTR(-ENOMEM); + return NULL; pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL); if (!pglist) { kfree(buf); - return ERR_PTR(-ENOMEM); + return NULL; } for (i = 0; i < nr_pages; i++) @@ -759,7 +760,7 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev, if (!buf->trbe_base) { kfree(pglist); kfree(buf); - return ERR_PTR(-ENOMEM); + return NULL; } buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE; buf->trbe_write = buf->trbe_base; @@ -1266,7 +1267,7 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp * into the device for that purpose. */ desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL); - if (IS_ERR(desc.pdata)) + if (!desc.pdata) goto cpu_clear; desc.type = CORESIGHT_DEV_TYPE_SINK; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index ef9bed2f2dcc..24c0ada72f6a 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -311,6 +311,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) exit_probe: dw_i2c_plat_pm_cleanup(dev); + i2c_dw_prepare_clk(dev, false); exit_reset: reset_control_assert(dev->rst); return ret; @@ -328,9 +329,11 @@ static void dw_i2c_plat_remove(struct platform_device *pdev) i2c_dw_disable(dev); pm_runtime_dont_use_autosuspend(device); - pm_runtime_put_sync(device); + pm_runtime_put_noidle(device); dw_i2c_plat_pm_cleanup(dev); + i2c_dw_prepare_clk(dev, false); + i2c_dw_remove_lock_support(dev); reset_control_assert(dev->rst); diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index e0ba653dec2d..8beef73960c7 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -1243,6 +1243,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap, { int ret; int left_num = num; + bool write_then_read_en = false; struct mtk_i2c *i2c = i2c_get_adapdata(adap); ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks); @@ -1256,6 +1257,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap, if (!(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD) && msgs[0].addr == msgs[1].addr) { i2c->auto_restart = 0; + write_then_read_en = true; } } @@ -1280,12 +1282,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap, else i2c->op = I2C_MASTER_WR; - if (!i2c->auto_restart) { - if (num > 1) { - /* combined two messages into one transaction */ - i2c->op = I2C_MASTER_WRRD; - left_num--; - } + if (write_then_read_en) { + /* combined two messages into one transaction */ + i2c->op = I2C_MASTER_WRRD; + left_num--; } /* always use DMA mode. */ @@ -1293,7 +1293,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap, if (ret < 0) goto err_exit; - msgs++; + if (i2c->op == I2C_MASTER_WRRD) + msgs += 2; + else + msgs++; } /* the return value is number of executed messages */ ret = num; diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index 474a96ebda22..a1945bf9ef19 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -377,6 +377,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master, SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000); if (ret) { dev_err(master->dev, "Timeout when polling for COMPLETE\n"); + i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); return ret; } @@ -438,9 +439,24 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) */ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); - /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */ - writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI | - SVC_I3C_MCTRL_IBIRESP_AUTO, + /* + * Write REQUEST_START_ADDR request to emit broadcast address for arbitration, + * instend of using AUTO_IBI. + * + * Using AutoIBI request may cause controller to remain in AutoIBI state when + * there is a glitch on SDA line (high->low->high). + * 1. SDA high->low, raising an interrupt to execute IBI isr. + * 2. SDA low->high. + * 3. IBI isr writes an AutoIBI request. + * 4. The controller will not start AutoIBI process because SDA is not low. + * 5. IBIWON polling times out. + * 6. Controller reamins in AutoIBI state and doesn't accept EmitStop request. + */ + writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | + SVC_I3C_MCTRL_TYPE_I3C | + SVC_I3C_MCTRL_IBIRESP_MANUAL | + SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE) | + SVC_I3C_MCTRL_ADDR(I3C_BROADCAST_ADDR), master->regs + SVC_I3C_MCTRL); /* Wait for IBIWON, should take approximately 100us */ @@ -460,10 +476,15 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) switch (ibitype) { case SVC_I3C_MSTATUS_IBITYPE_IBI: dev = svc_i3c_master_dev_from_addr(master, ibiaddr); - if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) + if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) { svc_i3c_master_nack_ibi(master); - else + } else { + if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) + svc_i3c_master_ack_ibi(master, true); + else + svc_i3c_master_ack_ibi(master, false); svc_i3c_master_handle_ibi(master, dev); + } break; case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN)) diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c index c3f9fa307b84..2db16c36d814 100644 --- a/drivers/iio/adc/pac1934.c +++ b/drivers/iio/adc/pac1934.c @@ -88,6 +88,7 @@ #define PAC1934_VPOWER_3_ADDR 0x19 #define PAC1934_VPOWER_4_ADDR 0x1A #define PAC1934_REFRESH_V_REG_ADDR 0x1F +#define PAC1934_SLOW_REG_ADDR 0x20 #define PAC1934_CTRL_STAT_REGS_ADDR 0x1C #define PAC1934_PID_REG_ADDR 0xFD #define PAC1934_MID_REG_ADDR 0xFE @@ -1265,8 +1266,23 @@ static int pac1934_chip_configure(struct pac1934_chip_info *info) /* no SLOW triggered REFRESH, clear POR */ regs[PAC1934_SLOW_REG_OFF] = 0; - ret = i2c_smbus_write_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR, - ARRAY_SIZE(regs), (u8 *)regs); + /* + * Write the three bytes sequentially, as the device does not support + * block write. + */ + ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_STAT_REGS_ADDR, + regs[PAC1934_CHANNEL_DIS_REG_OFF]); + if (ret) + return ret; + + ret = i2c_smbus_write_byte_data(client, + PAC1934_CTRL_STAT_REGS_ADDR + PAC1934_NEG_PWR_REG_OFF, + regs[PAC1934_NEG_PWR_REG_OFF]); + if (ret) + return ret; + + ret = i2c_smbus_write_byte_data(client, PAC1934_SLOW_REG_ADDR, + regs[PAC1934_SLOW_REG_OFF]); if (ret) return ret; diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c index ebc583b07e0c..e12eb7137b06 100644 --- a/drivers/iio/adc/xilinx-ams.c +++ b/drivers/iio/adc/xilinx-ams.c @@ -118,7 +118,7 @@ #define AMS_ALARM_THRESHOLD_OFF_10 0x10 #define AMS_ALARM_THRESHOLD_OFF_20 0x20 -#define AMS_ALARM_THR_DIRECT_MASK BIT(1) +#define AMS_ALARM_THR_DIRECT_MASK BIT(0) #define AMS_ALARM_THR_MIN 0x0000 #define AMS_ALARM_THR_MAX (BIT(16) - 1) @@ -389,6 +389,29 @@ static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask) ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg); } +static void ams_unmask(struct ams *ams) +{ + unsigned int status, unmask; + + status = readl(ams->base + AMS_ISR_0); + + /* Clear those bits which are not active anymore */ + unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm; + + /* Clear status of disabled alarm */ + unmask |= ams->intr_mask; + + ams->current_masked_alarm &= status; + + /* Also clear those which are masked out anyway */ + ams->current_masked_alarm &= ~ams->intr_mask; + + /* Clear the interrupts before we unmask them */ + writel(unmask, ams->base + AMS_ISR_0); + + ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK); +} + static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask) { unsigned long flags; @@ -401,6 +424,7 @@ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask) spin_lock_irqsave(&ams->intr_lock, flags); ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask); + ams_unmask(ams); spin_unlock_irqrestore(&ams->intr_lock, flags); } @@ -1035,28 +1059,9 @@ static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events) static void ams_unmask_worker(struct work_struct *work) { struct ams *ams = container_of(work, struct ams, ams_unmask_work.work); - unsigned int status, unmask; spin_lock_irq(&ams->intr_lock); - - status = readl(ams->base + AMS_ISR_0); - - /* Clear those bits which are not active anymore */ - unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm; - - /* Clear status of disabled alarm */ - unmask |= ams->intr_mask; - - ams->current_masked_alarm &= status; - - /* Also clear those which are masked out anyway */ - ams->current_masked_alarm &= ~ams->intr_mask; - - /* Clear the interrupts before we unmask them */ - writel(unmask, ams->base + AMS_ISR_0); - - ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK); - + ams_unmask(ams); spin_unlock_irq(&ams->intr_lock); /* If still pending some alarm re-trigger the timer */ diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c index e0b7f658d611..cf9cf90cd6e2 100644 --- a/drivers/iio/dac/ad5360.c +++ b/drivers/iio/dac/ad5360.c @@ -262,7 +262,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set, unsigned int clr) { struct ad5360_state *st = iio_priv(indio_dev); - unsigned int ret; + int ret; mutex_lock(&st->lock); diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c index 7644acfd879e..9228e3cee1b8 100644 --- a/drivers/iio/dac/ad5421.c +++ b/drivers/iio/dac/ad5421.c @@ -186,7 +186,7 @@ static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set, unsigned int clr) { struct ad5421_state *st = iio_priv(indio_dev); - unsigned int ret; + int ret; mutex_lock(&st->lock); diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c index e13e64a5164c..f58d23c049ec 100644 --- a/drivers/iio/frequency/adf4350.c +++ b/drivers/iio/frequency/adf4350.c @@ -149,6 +149,19 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq) return -EINVAL; + st->r4_rf_div_sel = 0; + + /* + * !\TODO: The below computation is making sure we get a power of 2 + * shift (st->r4_rf_div_sel) so that freq becomes higher or equal to + * ADF4350_MIN_VCO_FREQ. This might be simplified with fls()/fls_long() + * and friends. + */ + while (freq < ADF4350_MIN_VCO_FREQ) { + freq <<= 1; + st->r4_rf_div_sel++; + } + if (freq > ADF4350_MAX_FREQ_45_PRESC) { prescaler = ADF4350_REG1_PRESCALER; mdiv = 75; @@ -157,13 +170,6 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) mdiv = 23; } - st->r4_rf_div_sel = 0; - - while (freq < ADF4350_MIN_VCO_FREQ) { - freq <<= 1; - st->r4_rf_div_sel++; - } - /* * Allow a predefined reference division factor * if not set, compute our own diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c index 73aeddf53b76..790bc8fbf21d 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c @@ -847,10 +847,6 @@ static int inv_icm42600_resume(struct device *dev) if (ret) goto out_unlock; - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - /* restore sensors state */ ret = inv_icm42600_set_pwr_mgmt0(st, st->suspended.gyro, st->suspended.accel, diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 1155487f7aea..85ba80c57d08 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -10,6 +10,7 @@ #include <linux/mutex.h> #include <linux/property.h> #include <linux/slab.h> +#include <linux/units.h> #include <linux/iio/iio.h> #include <linux/iio/iio-opaque.h> @@ -602,7 +603,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, { int scale_type, scale_val, scale_val2; int offset_type, offset_val, offset_val2; - s64 raw64 = raw; + s64 denominator, raw64 = raw; offset_type = iio_channel_read(chan, &offset_val, &offset_val2, IIO_CHAN_INFO_OFFSET); @@ -637,7 +638,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, * If no channel scaling is available apply consumer scale to * raw value and return. */ - *processed = raw * scale; + *processed = raw64 * scale; return 0; } @@ -646,20 +647,19 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, *processed = raw64 * scale_val * scale; break; case IIO_VAL_INT_PLUS_MICRO: - if (scale_val2 < 0) - *processed = -raw64 * scale_val * scale; - else - *processed = raw64 * scale_val * scale; - *processed += div_s64(raw64 * (s64)scale_val2 * scale, - 1000000LL); - break; case IIO_VAL_INT_PLUS_NANO: - if (scale_val2 < 0) - *processed = -raw64 * scale_val * scale; - else - *processed = raw64 * scale_val * scale; - *processed += div_s64(raw64 * (s64)scale_val2 * scale, - 1000000000LL); + switch (scale_type) { + case IIO_VAL_INT_PLUS_MICRO: + denominator = MICRO; + break; + case IIO_VAL_INT_PLUS_NANO: + denominator = NANO; + break; + } + *processed = raw64 * scale * abs(scale_val); + *processed += div_s64(raw64 * scale * abs(scale_val2), denominator); + if (scale_val < 0 || scale_val2 < 0) + *processed *= -1; break; case IIO_VAL_FRACTIONAL: *processed = div_s64(raw64 * (s64)scale_val * scale, diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index be0743dac3ff..929e89841c12 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -454,14 +454,10 @@ static int addr_resolve_neigh(const struct dst_entry *dst, { int ret = 0; - if (ndev_flags & IFF_LOOPBACK) { + if (ndev_flags & IFF_LOOPBACK) memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); - } else { - if (!(ndev_flags & IFF_NOARP)) { - /* If the device doesn't do ARP internally */ - ret = fetch_ha(dst, addr, dst_in, seq); - } - } + else + ret = fetch_ha(dst, addr, dst_in, seq); return ret; } diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index d45e3909dafe..50bb3c43f40b 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1032,8 +1032,8 @@ static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id, struct cm_id_private *cm_id_priv; cm_id_priv = container_of(cm_id, struct cm_id_private, id); - pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__, - cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount)); + pr_err_ratelimited("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__, + cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount)); } static void cm_destroy_id(struct ib_cm_id *cm_id, int err) diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 53571e6b3162..66df5bed6a56 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1013,6 +1013,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; + spin_lock_irqsave(&ib_nl_request_lock, flags); + delta = timeout - sa_local_svc_timeout_ms; if (delta < 0) abs_delta = -delta; @@ -1020,7 +1022,6 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, abs_delta = delta; if (delta != 0) { - spin_lock_irqsave(&ib_nl_request_lock, flags); sa_local_svc_timeout_ms = timeout; list_for_each_entry(query, &ib_nl_request_list, list) { if (delta < 0 && abs_delta > query->timeout) @@ -1038,9 +1039,10 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, if (delay) mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, (unsigned long)delay); - spin_unlock_irqrestore(&ib_nl_request_lock, flags); } + spin_unlock_irqrestore(&ib_nl_request_lock, flags); + settimeout_out: return 0; } diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 435c456a4fd5..f3e58797705d 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -13,6 +13,7 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/bitmap.h> +#include <linux/log2.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> @@ -865,6 +866,51 @@ static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev, resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask(); } +/* + * Calculate maximum SQ overhead across all QP types. + * Other QP types (REG_UMR, UC, RC, UD/SMI/GSI, XRC_TGT) + * have smaller overhead than the types calculated below, + * so they are implicitly included. + */ +static u32 mlx5_ib_calc_max_sq_overhead(void) +{ + u32 max_overhead_xrc, overhead_ud_lso, a, b; + + /* XRC_INI */ + max_overhead_xrc = sizeof(struct mlx5_wqe_xrc_seg); + max_overhead_xrc += sizeof(struct mlx5_wqe_ctrl_seg); + a = sizeof(struct mlx5_wqe_atomic_seg) + + sizeof(struct mlx5_wqe_raddr_seg); + b = sizeof(struct mlx5_wqe_umr_ctrl_seg) + + sizeof(struct mlx5_mkey_seg) + + MLX5_IB_SQ_UMR_INLINE_THRESHOLD / MLX5_IB_UMR_OCTOWORD; + max_overhead_xrc += max(a, b); + + /* UD with LSO */ + overhead_ud_lso = sizeof(struct mlx5_wqe_ctrl_seg); + overhead_ud_lso += sizeof(struct mlx5_wqe_eth_pad); + overhead_ud_lso += sizeof(struct mlx5_wqe_eth_seg); + overhead_ud_lso += sizeof(struct mlx5_wqe_datagram_seg); + + return max(max_overhead_xrc, overhead_ud_lso); +} + +static u32 mlx5_ib_calc_max_qp_wr(struct mlx5_ib_dev *dev) +{ + struct mlx5_core_dev *mdev = dev->mdev; + u32 max_wqe_bb_units = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); + u32 max_wqe_size; + /* max QP overhead + 1 SGE, no inline, no special features */ + max_wqe_size = mlx5_ib_calc_max_sq_overhead() + + sizeof(struct mlx5_wqe_data_seg); + + max_wqe_size = roundup_pow_of_two(max_wqe_size); + + max_wqe_size = ALIGN(max_wqe_size, MLX5_SEND_WQE_BB); + + return (max_wqe_bb_units * MLX5_SEND_WQE_BB) / max_wqe_size; +} + static int mlx5_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) @@ -1023,7 +1069,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->max_mr_size = ~0ull; props->page_size_cap = ~(min_page_size - 1); props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); - props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); + props->max_qp_wr = mlx5_ib_calc_max_qp_wr(dev); max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / sizeof(struct mlx5_wqe_data_seg); max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512); @@ -1767,7 +1813,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev, } static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master, - struct mlx5_core_dev *slave) + struct mlx5_core_dev *slave, + struct mlx5_ib_lb_state *lb_state) { int err; @@ -1779,6 +1826,7 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master, if (err) goto out; + lb_state->force_enable = true; return 0; out: @@ -1787,16 +1835,22 @@ out: } static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master, - struct mlx5_core_dev *slave) + struct mlx5_core_dev *slave, + struct mlx5_ib_lb_state *lb_state) { mlx5_nic_vport_update_local_lb(slave, false); mlx5_nic_vport_update_local_lb(master, false); + + lb_state->force_enable = false; } int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) { int err = 0; + if (dev->lb.force_enable) + return 0; + mutex_lock(&dev->lb.mutex); if (td) dev->lb.user_td++; @@ -1818,6 +1872,9 @@ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) { + if (dev->lb.force_enable) + return; + mutex_lock(&dev->lb.mutex); if (td) dev->lb.user_td--; @@ -3475,7 +3532,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, lockdep_assert_held(&mlx5_ib_multiport_mutex); - mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev); + mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb); mlx5_core_mp_event_replay(ibdev->mdev, MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, @@ -3572,7 +3629,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, MLX5_DRIVER_EVENT_AFFILIATION_DONE, &key); - err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev); + err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb); if (err) goto unbind; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 29bde64ea1ea..f49cb588a856 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1083,6 +1083,7 @@ struct mlx5_ib_lb_state { u32 user_td; int qps; bool enabled; + bool force_enable; }; struct mlx5_ib_pf_eq { diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 80332638d9e3..be6cd8ce4d97 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -132,8 +132,12 @@ static void do_task(struct rxe_task *task) * yield the cpu and reschedule the task */ if (!ret) { - task->state = TASK_STATE_IDLE; - resched = 1; + if (task->state != TASK_STATE_DRAINING) { + task->state = TASK_STATE_IDLE; + resched = 1; + } else { + cont = 1; + } goto exit; } diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 7ca0297d68a4..d0c0cde09f11 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -773,7 +773,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, struct siw_wqe *wqe = tx_wqe(qp); unsigned long flags; - int rv = 0; + int rv = 0, imm_err = 0; if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) { siw_dbg_qp(qp, "wr must be empty for user mapped sq\n"); @@ -959,9 +959,17 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, * Send directly if SQ processing is not in progress. * Eventual immediate errors (rv < 0) do not affect the involved * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ - * processing, if new work is already pending. But rv must be passed - * to caller. + * processing, if new work is already pending. But rv and pointer + * to failed work request must be passed to caller. */ + if (unlikely(rv < 0)) { + /* + * Immediate error + */ + siw_dbg_qp(qp, "Immediate error %d\n", rv); + imm_err = rv; + *bad_wr = wr; + } if (wqe->wr_status != SIW_WR_IDLE) { spin_unlock_irqrestore(&qp->sq_lock, flags); goto skip_direct_sending; @@ -986,15 +994,10 @@ skip_direct_sending: up_read(&qp->state_lock); - if (rv >= 0) - return 0; - /* - * Immediate error - */ - siw_dbg_qp(qp, "error %d\n", rv); + if (unlikely(imm_err)) + return imm_err; - *bad_wr = wr; - return rv; + return (rv >= 0) ? 0 : rv; } /* diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 2c51ea9d01d7..13336a2fd49c 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -775,6 +775,7 @@ static int uinput_ff_upload_to_user(char __user *buffer, if (in_compat_syscall()) { struct uinput_ff_upload_compat ff_up_compat; + memset(&ff_up_compat, 0, sizeof(ff_up_compat)); ff_up_compat.request_id = ff_up->request_id; ff_up_compat.retval = ff_up->retval; /* diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 3ddabc5a2c99..d7496d47eabe 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -3319,7 +3319,7 @@ static int mxt_probe(struct i2c_client *client) if (data->reset_gpio) { /* Wait a while and then de-assert the RESET GPIO line */ msleep(MXT_RESET_GPIO_TIME); - gpiod_set_value(data->reset_gpio, 0); + gpiod_set_value_cansleep(data->reset_gpio, 0); msleep(MXT_RESET_INVALID_CHG); } diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c index affbf4a1558d..5aa7f46a420b 100644 --- a/drivers/iommu/intel/debugfs.c +++ b/drivers/iommu/intel/debugfs.c @@ -435,8 +435,21 @@ static int domain_translation_struct_show(struct seq_file *m, } pgd &= VTD_PAGE_MASK; } else { /* legacy mode */ - pgd = context->lo & VTD_PAGE_MASK; - agaw = context->hi & 7; + u8 tt = (u8)(context->lo & GENMASK_ULL(3, 2)) >> 2; + + /* + * According to Translation Type(TT), + * get the page table pointer(SSPTPTR). + */ + switch (tt) { + case CONTEXT_TT_MULTI_LEVEL: + case CONTEXT_TT_DEV_IOTLB: + pgd = context->lo & VTD_PAGE_MASK; + agaw = context->hi & 7; + break; + default: + goto iommu_unlock; + } } seq_printf(m, "Device %04x:%02x:%02x.%x ", diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 32d6710e264a..667407974e23 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3952,7 +3952,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) } if (info->ats_supported && ecap_prs(iommu->ecap) && - pci_pri_supported(pdev)) + ecap_pds(iommu->ecap) && pci_pri_supported(pdev)) info->pri_supported = 1; } } diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index f521155fb793..df24a62e8ca4 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -541,7 +541,8 @@ enum { #define pasid_supported(iommu) (sm_supported(iommu) && \ ecap_pasid((iommu)->ecap)) #define ssads_supported(iommu) (sm_supported(iommu) && \ - ecap_slads((iommu)->ecap)) + ecap_slads((iommu)->ecap) && \ + ecap_smpwc(iommu->ecap)) #define nested_supported(iommu) (sm_supported(iommu) && \ ecap_nest((iommu)->ecap)) diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 36dbcf2d728a..9c4af7d58846 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -252,11 +252,11 @@ static int plic_irq_suspend(void) priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; - for (i = 0; i < priv->nr_irqs; i++) - if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)) - __set_bit(i, priv->prio_save); - else - __clear_bit(i, priv->prio_save); + /* irq ID 0 is reserved */ + for (i = 1; i < priv->nr_irqs; i++) { + __assign_bit(i, priv->prio_save, + readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)); + } for_each_cpu(cpu, cpu_present_mask) { struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); @@ -284,7 +284,8 @@ static void plic_irq_resume(void) priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; - for (i = 0; i < priv->nr_irqs; i++) { + /* irq ID 0 is reserved */ + for (i = 1; i < priv->nr_irqs; i++) { index = BIT_WORD(i); writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c index 07a83bb2dfdf..bb00097b1ae5 100644 --- a/drivers/leds/flash/leds-qcom-flash.c +++ b/drivers/leds/flash/leds-qcom-flash.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/bitfield.h> @@ -114,36 +114,39 @@ enum { REG_THERM_THRSH1, REG_THERM_THRSH2, REG_THERM_THRSH3, + REG_TORCH_CLAMP, REG_MAX_COUNT, }; static const struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = { - REG_FIELD(0x08, 0, 7), /* status1 */ - REG_FIELD(0x09, 0, 7), /* status2 */ - REG_FIELD(0x0a, 0, 7), /* status3 */ - REG_FIELD_ID(0x40, 0, 7, 3, 1), /* chan_timer */ - REG_FIELD_ID(0x43, 0, 6, 3, 1), /* itarget */ - REG_FIELD(0x46, 7, 7), /* module_en */ - REG_FIELD(0x47, 0, 5), /* iresolution */ - REG_FIELD_ID(0x49, 0, 2, 3, 1), /* chan_strobe */ - REG_FIELD(0x4c, 0, 2), /* chan_en */ - REG_FIELD(0x56, 0, 2), /* therm_thrsh1 */ - REG_FIELD(0x57, 0, 2), /* therm_thrsh2 */ - REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */ + [REG_STATUS1] = REG_FIELD(0x08, 0, 7), + [REG_STATUS2] = REG_FIELD(0x09, 0, 7), + [REG_STATUS3] = REG_FIELD(0x0a, 0, 7), + [REG_CHAN_TIMER] = REG_FIELD_ID(0x40, 0, 7, 3, 1), + [REG_ITARGET] = REG_FIELD_ID(0x43, 0, 6, 3, 1), + [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7), + [REG_IRESOLUTION] = REG_FIELD(0x47, 0, 5), + [REG_CHAN_STROBE] = REG_FIELD_ID(0x49, 0, 2, 3, 1), + [REG_CHAN_EN] = REG_FIELD(0x4c, 0, 2), + [REG_THERM_THRSH1] = REG_FIELD(0x56, 0, 2), + [REG_THERM_THRSH2] = REG_FIELD(0x57, 0, 2), + [REG_THERM_THRSH3] = REG_FIELD(0x58, 0, 2), + [REG_TORCH_CLAMP] = REG_FIELD(0xec, 0, 6), }; static const struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = { - REG_FIELD(0x06, 0, 7), /* status1 */ - REG_FIELD(0x07, 0, 6), /* status2 */ - REG_FIELD(0x09, 0, 7), /* status3 */ - REG_FIELD_ID(0x3e, 0, 7, 4, 1), /* chan_timer */ - REG_FIELD_ID(0x42, 0, 6, 4, 1), /* itarget */ - REG_FIELD(0x46, 7, 7), /* module_en */ - REG_FIELD(0x49, 0, 3), /* iresolution */ - REG_FIELD_ID(0x4a, 0, 6, 4, 1), /* chan_strobe */ - REG_FIELD(0x4e, 0, 3), /* chan_en */ - REG_FIELD(0x7a, 0, 2), /* therm_thrsh1 */ - REG_FIELD(0x78, 0, 2), /* therm_thrsh2 */ + [REG_STATUS1] = REG_FIELD(0x06, 0, 7), + [REG_STATUS2] = REG_FIELD(0x07, 0, 6), + [REG_STATUS3] = REG_FIELD(0x09, 0, 7), + [REG_CHAN_TIMER] = REG_FIELD_ID(0x3e, 0, 7, 4, 1), + [REG_ITARGET] = REG_FIELD_ID(0x42, 0, 6, 4, 1), + [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7), + [REG_IRESOLUTION] = REG_FIELD(0x49, 0, 3), + [REG_CHAN_STROBE] = REG_FIELD_ID(0x4a, 0, 6, 4, 1), + [REG_CHAN_EN] = REG_FIELD(0x4e, 0, 3), + [REG_THERM_THRSH1] = REG_FIELD(0x7a, 0, 2), + [REG_THERM_THRSH2] = REG_FIELD(0x78, 0, 2), + [REG_TORCH_CLAMP] = REG_FIELD(0xed, 0, 6), }; struct qcom_flash_data { @@ -156,6 +159,7 @@ struct qcom_flash_data { u8 max_channels; u8 chan_en_bits; u8 revision; + u8 torch_clamp; }; struct qcom_flash_led { @@ -702,6 +706,7 @@ static int qcom_flash_register_led_device(struct device *dev, u32 current_ua, timeout_us; u32 channels[4]; int i, rc, count; + u8 torch_clamp; count = fwnode_property_count_u32(node, "led-sources"); if (count <= 0) { @@ -751,6 +756,12 @@ static int qcom_flash_register_led_device(struct device *dev, current_ua = min_t(u32, current_ua, TORCH_CURRENT_MAX_UA * led->chan_count); led->max_torch_current_ma = current_ua / UA_PER_MA; + torch_clamp = (current_ua / led->chan_count) / TORCH_IRES_UA; + if (torch_clamp != 0) + torch_clamp--; + + flash_data->torch_clamp = max_t(u8, flash_data->torch_clamp, torch_clamp); + if (fwnode_property_present(node, "flash-max-microamp")) { flash->led_cdev.flags |= LED_DEV_CAP_FLASH; @@ -918,8 +929,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev) flash_data->leds_count++; } - return 0; - + return regmap_field_write(flash_data->r_fields[REG_TORCH_CLAMP], flash_data->torch_clamp); release: fwnode_handle_put(child); while (flash_data->v4l2_flash[flash_data->leds_count] && flash_data->leds_count) diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index e71456a56ab8..fd447eb7eb15 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -212,7 +212,7 @@ int lp55xx_update_program_memory(struct lp55xx_chip *chip, * For LED chip that support page, PAGE is already set in load_engine. */ if (!cfg->pages_per_engine) - start_addr += LP55xx_BYTES_PER_PAGE * idx; + start_addr += LP55xx_BYTES_PER_PAGE * (idx - 1); for (page = 0; page < program_length / LP55xx_BYTES_PER_PAGE; page++) { /* Write to the next page each 32 bytes (if supported) */ diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index d24f71819c3d..38ab35157c85 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -379,20 +379,13 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); struct cmdq_task *task; unsigned long curr_pa, end_pa; - int ret; /* Client should not flush new tasks if suspended. */ WARN_ON(cmdq->suspended); - ret = pm_runtime_get_sync(cmdq->mbox.dev); - if (ret < 0) - return ret; - task = kzalloc(sizeof(*task), GFP_ATOMIC); - if (!task) { - pm_runtime_put_autosuspend(cmdq->mbox.dev); + if (!task) return -ENOMEM; - } task->cmdq = cmdq; INIT_LIST_HEAD(&task->list_entry); @@ -439,9 +432,6 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) } list_move_tail(&task->list_entry, &thread->task_busy_list); - pm_runtime_mark_last_busy(cmdq->mbox.dev); - pm_runtime_put_autosuspend(cmdq->mbox.dev); - return 0; } diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index d59fcb74b347..5bf81b60e9e6 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -62,7 +62,8 @@ #define DST_BIT_POS 9U #define SRC_BITMASK GENMASK(11, 8) -#define MAX_SGI 16 +/* Macro to represent SGI type for IPI IRQs */ +#define IPI_IRQ_TYPE_SGI 2 /* * Module parameters @@ -121,6 +122,7 @@ struct zynqmp_ipi_mbox { * @dev: device pointer corresponding to the Xilinx ZynqMP * IPI agent * @irq: IPI agent interrupt ID + * @irq_type: IPI SGI or SPI IRQ type * @method: IPI SMC or HVC is going to be used * @local_id: local IPI agent ID * @virq_sgi: IRQ number mapped to SGI @@ -130,6 +132,7 @@ struct zynqmp_ipi_mbox { struct zynqmp_ipi_pdata { struct device *dev; int irq; + unsigned int irq_type; unsigned int method; u32 local_id; int virq_sgi; @@ -887,17 +890,14 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata) struct zynqmp_ipi_mbox *ipi_mbox; int i; - if (pdata->irq < MAX_SGI) + if (pdata->irq_type == IPI_IRQ_TYPE_SGI) xlnx_mbox_cleanup_sgi(pdata); - i = pdata->num_mboxes; + i = pdata->num_mboxes - 1; for (; i >= 0; i--) { ipi_mbox = &pdata->ipi_mboxes[i]; - if (ipi_mbox->dev.parent) { - mbox_controller_unregister(&ipi_mbox->mbox); - if (device_is_registered(&ipi_mbox->dev)) - device_unregister(&ipi_mbox->dev); - } + if (device_is_registered(&ipi_mbox->dev)) + device_unregister(&ipi_mbox->dev); } } @@ -959,14 +959,16 @@ static int zynqmp_ipi_probe(struct platform_device *pdev) dev_err(dev, "failed to parse interrupts\n"); goto free_mbox_dev; } - ret = out_irq.args[1]; + + /* Use interrupt type to distinguish SGI and SPI interrupts */ + pdata->irq_type = out_irq.args[0]; /* * If Interrupt number is in SGI range, then request SGI else request * IPI system IRQ. */ - if (ret < MAX_SGI) { - pdata->irq = ret; + if (pdata->irq_type == IPI_IRQ_TYPE_SGI) { + pdata->irq = out_irq.args[1]; ret = xlnx_mbox_init_sgi(pdev, pdata->irq, pdata); if (ret) goto free_mbox_dev; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index f3a3f2ef6322..391c1df19a76 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -162,6 +162,7 @@ struct mapped_device { #define DMF_SUSPENDED_INTERNALLY 7 #define DMF_POST_SUSPENDING 8 #define DMF_EMULATE_ZONE_APPEND 9 +#define DMF_QUEUE_STOPPED 10 void disable_discard(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md); diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c index 12f954a0c532..afb062e1f1fb 100644 --- a/drivers/md/dm-vdo/indexer/volume-index.c +++ b/drivers/md/dm-vdo/indexer/volume-index.c @@ -836,7 +836,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index, "%zu bytes decoded of %zu expected", offset, sizeof(buffer)); if (result != VDO_SUCCESS) - result = UDS_CORRUPT_DATA; + return UDS_CORRUPT_DATA; if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) { return vdo_log_warning_strerror(UDS_CORRUPT_DATA, @@ -928,7 +928,7 @@ static int start_restoring_volume_index(struct volume_index *volume_index, "%zu bytes decoded of %zu expected", offset, sizeof(buffer)); if (result != VDO_SUCCESS) - result = UDS_CORRUPT_DATA; + return UDS_CORRUPT_DATA; if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0) return vdo_log_warning_strerror(UDS_CORRUPT_DATA, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a7deeda59a55..fd84a126f63f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2918,7 +2918,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, { bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; - int r; + int r = 0; lockdep_assert_held(&md->suspend_lock); @@ -2970,8 +2970,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, * Stop md->queue before flushing md->wq in case request-based * dm defers requests to md->wq from md->queue. */ - if (dm_request_based(md)) + if (map && dm_request_based(md)) { dm_stop_queue(md->queue); + set_bit(DMF_QUEUE_STOPPED, &md->flags); + } flush_workqueue(md->wq); @@ -2980,7 +2982,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, * We call dm_wait_for_completion to wait for all existing requests * to finish. */ - r = dm_wait_for_completion(md, task_state); + if (map) + r = dm_wait_for_completion(md, task_state); if (!r) set_bit(dmf_suspended_flag, &md->flags); @@ -2993,7 +2996,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, if (r < 0) { dm_queue_flush(md); - if (dm_request_based(md)) + if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags)) dm_start_queue(md->queue); unlock_fs(md); @@ -3077,7 +3080,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map) * so that mapping of targets can work correctly. * Request-based dm is queueing the deferred I/Os in its request_queue. */ - if (dm_request_based(md)) + if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags)) dm_start_queue(md->queue); unlock_fs(md); diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile index 2e8f7f60263f..08d58524419f 100644 --- a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile +++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile @@ -1,8 +1,2 @@ extron-da-hd-4k-plus-cec-objs := extron-da-hd-4k-plus.o cec-splitter.o obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) := extron-da-hd-4k-plus-cec.o - -all: - $(MAKE) -C $(KDIR) M=$(shell pwd) modules - -install: - $(MAKE) -C $(KDIR) M=$(shell pwd) modules_install diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c index 723fe138e7bc..8bf06a763a25 100644 --- a/drivers/media/i2c/mt9v111.c +++ b/drivers/media/i2c/mt9v111.c @@ -532,8 +532,8 @@ static int mt9v111_calc_frame_rate(struct mt9v111_dev *mt9v111, static int mt9v111_hw_config(struct mt9v111_dev *mt9v111) { struct i2c_client *c = mt9v111->client; - unsigned int ret; u16 outfmtctrl2; + int ret; /* Force device reset. */ ret = __mt9v111_hw_reset(mt9v111); diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c index b7ca39f63dba..6dfc91216851 100644 --- a/drivers/media/i2c/rj54n1cb0c.c +++ b/drivers/media/i2c/rj54n1cb0c.c @@ -1329,10 +1329,13 @@ static int rj54n1_probe(struct i2c_client *client) V4L2_CID_GAIN, 0, 127, 1, 66); v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); - rj54n1->subdev.ctrl_handler = &rj54n1->hdl; - if (rj54n1->hdl.error) - return rj54n1->hdl.error; + if (rj54n1->hdl.error) { + ret = rj54n1->hdl.error; + goto err_free_ctrl; + } + + rj54n1->subdev.ctrl_handler = &rj54n1->hdl; rj54n1->clk_div = clk_div; rj54n1->rect.left = RJ54N1_COLUMN_SKIP; rj54n1->rect.top = RJ54N1_ROW_SKIP; diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c index 56444edaf136..6daa7aa99442 100644 --- a/drivers/media/mc/mc-devnode.c +++ b/drivers/media/mc/mc-devnode.c @@ -50,11 +50,6 @@ static void media_devnode_release(struct device *cd) { struct media_devnode *devnode = to_media_devnode(cd); - mutex_lock(&media_devnode_lock); - /* Mark device node number as free */ - clear_bit(devnode->minor, media_devnode_nums); - mutex_unlock(&media_devnode_lock); - /* Release media_devnode and perform other cleanups as needed. */ if (devnode->release) devnode->release(devnode); @@ -281,6 +276,7 @@ void media_devnode_unregister(struct media_devnode *devnode) /* Delete the cdev on this minor as well */ cdev_device_del(&devnode->cdev, &devnode->dev); devnode->media_dev = NULL; + clear_bit(devnode->minor, media_devnode_nums); mutex_unlock(&media_devnode_lock); put_device(&devnode->dev); diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c index 96dd0f6ccd0d..b3761839a521 100644 --- a/drivers/media/mc/mc-entity.c +++ b/drivers/media/mc/mc-entity.c @@ -691,7 +691,7 @@ done: * (already discovered through iterating over links) and pads * not internally connected. */ - if (origin == local || !local->num_links || + if (origin == local || local->num_links || !media_entity_has_pad_interdep(origin->entity, origin->index, local->index)) continue; diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c index 013694bfcb1c..7cbb2d586932 100644 --- a/drivers/media/pci/cx18/cx18-queue.c +++ b/drivers/media/pci/cx18/cx18-queue.c @@ -379,15 +379,22 @@ int cx18_stream_alloc(struct cx18_stream *s) break; } + buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev, + buf->buf, s->buf_size, + s->dma); + if (dma_mapping_error(&s->cx->pci_dev->dev, buf->dma_handle)) { + kfree(buf->buf); + kfree(mdl); + kfree(buf); + break; + } + INIT_LIST_HEAD(&mdl->list); INIT_LIST_HEAD(&mdl->buf_list); mdl->id = s->mdl_base_idx; /* a somewhat safe value */ cx18_enqueue(s, mdl, &s->q_idle); INIT_LIST_HEAD(&buf->list); - buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev, - buf->buf, s->buf_size, - s->dma); cx18_buf_sync_for_cpu(s, buf); list_add_tail(&buf->list, &s->buf_pool); } diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c index b7aaa8b4a784..e39bf64c5c71 100644 --- a/drivers/media/pci/ivtv/ivtv-irq.c +++ b/drivers/media/pci/ivtv/ivtv-irq.c @@ -351,7 +351,7 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock) /* Insert buffer block for YUV if needed */ if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) { - if (yi->blanking_dmaptr) { + if (yi->blanking_ptr) { s->sg_pending[idx].src = yi->blanking_dmaptr; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = 720 * 16; diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c index 2d9274537725..71f040106647 100644 --- a/drivers/media/pci/ivtv/ivtv-yuv.c +++ b/drivers/media/pci/ivtv/ivtv-yuv.c @@ -125,7 +125,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma, ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size); /* If we've offset the y plane, ensure top area is blanked */ - if (f->offset_y && yi->blanking_dmaptr) { + if (f->offset_y && yi->blanking_ptr) { dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16); dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr); dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]); @@ -929,6 +929,12 @@ static void ivtv_yuv_init(struct ivtv *itv) yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev, yi->blanking_ptr, 720 * 16, DMA_TO_DEVICE); + if (dma_mapping_error(&itv->pdev->dev, yi->blanking_dmaptr)) { + kfree(yi->blanking_ptr); + yi->blanking_ptr = NULL; + yi->blanking_dmaptr = 0; + IVTV_DEBUG_WARN("Failed to dma_map yuv blanking buffer\n"); + } } else { yi->blanking_dmaptr = 0; IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n"); diff --git a/drivers/media/pci/mgb4/mgb4_trigger.c b/drivers/media/pci/mgb4/mgb4_trigger.c index 923650d53d4c..d7dddc5c8728 100644 --- a/drivers/media/pci/mgb4/mgb4_trigger.c +++ b/drivers/media/pci/mgb4/mgb4_trigger.c @@ -91,7 +91,7 @@ static irqreturn_t trigger_handler(int irq, void *p) struct { u32 data; s64 ts __aligned(8); - } scan; + } scan = { }; scan.data = mgb4_read_reg(&st->mgbdev->video, 0xA0); mgb4_write_reg(&st->mgbdev->video, 0xA0, scan.data); diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h index 1cd990468d3d..d05e222b3921 100644 --- a/drivers/media/pci/zoran/zoran.h +++ b/drivers/media/pci/zoran/zoran.h @@ -154,12 +154,6 @@ struct zoran_jpg_settings { struct zoran; -/* zoran_fh contains per-open() settings */ -struct zoran_fh { - struct v4l2_fh fh; - struct zoran *zr; -}; - struct card_info { enum card_type type; char name[32]; diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c index 5c05e64c71a9..80377992a607 100644 --- a/drivers/media/pci/zoran/zoran_driver.c +++ b/drivers/media/pci/zoran/zoran_driver.c @@ -511,12 +511,11 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_format *fmt) { struct zoran *zr = video_drvdata(file); - struct zoran_fh *fh = __fh; int i; int res = 0; if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) - return zoran_s_fmt_vid_out(file, fh, fmt); + return zoran_s_fmt_vid_out(file, __fh, fmt); for (i = 0; i < NUM_FORMATS; i++) if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc) diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c index 66a18830e66d..4e2636b05366 100644 --- a/drivers/media/platform/qcom/venus/firmware.c +++ b/drivers/media/platform/qcom/venus/firmware.c @@ -30,7 +30,7 @@ static void venus_reset_cpu(struct venus_core *core) u32 fw_size = core->fw.mapped_mem_size; void __iomem *wrapper_base; - if (IS_IRIS2_1(core)) + if (IS_IRIS2(core) || IS_IRIS2_1(core)) wrapper_base = core->wrapper_tz_base; else wrapper_base = core->wrapper_base; @@ -42,7 +42,7 @@ static void venus_reset_cpu(struct venus_core *core) writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR); writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR); - if (IS_IRIS2_1(core)) { + if (IS_IRIS2(core) || IS_IRIS2_1(core)) { /* Bring XTSS out of reset */ writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET); } else { @@ -68,7 +68,7 @@ int venus_set_hw_state(struct venus_core *core, bool resume) if (resume) { venus_reset_cpu(core); } else { - if (IS_IRIS2_1(core)) + if (IS_IRIS2(core) || IS_IRIS2_1(core)) writel(WRAPPER_XTSS_SW_RESET_BIT, core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET); else @@ -181,7 +181,7 @@ static int venus_shutdown_no_tz(struct venus_core *core) void __iomem *wrapper_base = core->wrapper_base; void __iomem *wrapper_tz_base = core->wrapper_tz_base; - if (IS_IRIS2_1(core)) { + if (IS_IRIS2(core) || IS_IRIS2_1(core)) { /* Assert the reset to XTSS */ reg = readl(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET); reg |= WRAPPER_XTSS_SW_RESET_BIT; diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c index 47bc3014b5d8..f7c682fca645 100644 --- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c +++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c @@ -14,8 +14,7 @@ #include "s5p_mfc_opr.h" #include "s5p_mfc_cmd_v6.h" -static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd, - const struct s5p_mfc_cmd_args *args) +static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd) { mfc_debug(2, "Issue the command: %d\n", cmd); @@ -31,7 +30,6 @@ static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd, static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev) { - struct s5p_mfc_cmd_args h2r_args; const struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv; int ret; @@ -41,33 +39,23 @@ static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev) mfc_write(dev, dev->ctx_buf.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6); mfc_write(dev, buf_size->dev_ctx, S5P_FIMV_CONTEXT_MEM_SIZE_V6); - return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6, - &h2r_args); + return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6); } static int s5p_mfc_sleep_cmd_v6(struct s5p_mfc_dev *dev) { - struct s5p_mfc_cmd_args h2r_args; - - memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args)); - return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6, - &h2r_args); + return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6); } static int s5p_mfc_wakeup_cmd_v6(struct s5p_mfc_dev *dev) { - struct s5p_mfc_cmd_args h2r_args; - - memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args)); - return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6, - &h2r_args); + return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6); } /* Open a new instance and get its number */ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - struct s5p_mfc_cmd_args h2r_args; int codec_type; mfc_debug(2, "Requested codec mode: %d\n", ctx->codec_mode); @@ -129,23 +117,20 @@ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx) mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6); mfc_write(dev, 0, S5P_FIMV_D_CRC_CTRL_V6); /* no crc */ - return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6, - &h2r_args); + return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6); } /* Close instance */ static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - struct s5p_mfc_cmd_args h2r_args; int ret = 0; dev->curr_ctx = ctx->num; if (ctx->state != MFCINST_FREE) { mfc_write(dev, ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6); ret = s5p_mfc_cmd_host2risc_v6(dev, - S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6, - &h2r_args); + S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6); } else { ret = -EINVAL; } @@ -153,9 +138,15 @@ static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx) return ret; } +static int s5p_mfc_cmd_host2risc_v6_args(struct s5p_mfc_dev *dev, int cmd, + const struct s5p_mfc_cmd_args *ignored) +{ + return s5p_mfc_cmd_host2risc_v6(dev, cmd); +} + /* Initialize cmd function pointers for MFC v6 */ static const struct s5p_mfc_hw_cmds s5p_mfc_cmds_v6 = { - .cmd_host2risc = s5p_mfc_cmd_host2risc_v6, + .cmd_host2risc = s5p_mfc_cmd_host2risc_v6_args, .sys_init_cmd = s5p_mfc_sys_init_cmd_v6, .sleep_cmd = s5p_mfc_sleep_cmd_v6, .wakeup_cmd = s5p_mfc_wakeup_cmd_v6, diff --git a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c index 0533d4a083d2..a078f1107300 100644 --- a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c +++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c @@ -239,7 +239,7 @@ static int delta_mjpeg_ipc_open(struct delta_ctx *pctx) return 0; } -static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au) +static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, dma_addr_t pstart, dma_addr_t pend) { struct delta_dev *delta = pctx->dev; struct delta_mjpeg_ctx *ctx = to_ctx(pctx); @@ -256,8 +256,8 @@ static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au) memset(params, 0, sizeof(*params)); - params->picture_start_addr_p = (u32)(au->paddr); - params->picture_end_addr_p = (u32)(au->paddr + au->size - 1); + params->picture_start_addr_p = pstart; + params->picture_end_addr_p = pend; /* * !WARNING! @@ -374,12 +374,14 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau) struct delta_dev *delta = pctx->dev; struct delta_mjpeg_ctx *ctx = to_ctx(pctx); int ret; - struct delta_au au = *pau; + void *au_vaddr = pau->vaddr; + dma_addr_t au_dma = pau->paddr; + size_t au_size = pau->size; unsigned int data_offset = 0; struct mjpeg_header *header = &ctx->header_struct; if (!ctx->header) { - ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size, + ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size, header, &data_offset); if (ret) { pctx->stream_errors++; @@ -405,17 +407,17 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau) goto err; } - ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size, + ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size, ctx->header, &data_offset); if (ret) { pctx->stream_errors++; goto err; } - au.paddr += data_offset; - au.vaddr += data_offset; + au_dma += data_offset; + au_vaddr += data_offset; - ret = delta_mjpeg_ipc_decode(pctx, &au); + ret = delta_mjpeg_ipc_decode(pctx, au_dma, au_dma + au_size - 1); if (ret) goto err; diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c index 3853245fcf6e..f8cb93ce9459 100644 --- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c +++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c @@ -52,6 +52,8 @@ #define DRAIN_TIMEOUT_MS 50 #define DRAIN_BUFFER_SIZE SZ_32K +#define CSI2RX_BRIDGE_SOURCE_PAD 1 + struct ti_csi2rx_fmt { u32 fourcc; /* Four character code. */ u32 code; /* Mbus code. */ @@ -426,8 +428,9 @@ static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier) if (ret) return ret; - ret = v4l2_create_fwnode_links_to_pad(csi->source, &csi->pad, - MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + ret = media_create_pad_link(&csi->source->entity, CSI2RX_BRIDGE_SOURCE_PAD, + &vdev->entity, csi->pad.index, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) { video_unregister_device(vdev); @@ -1121,7 +1124,7 @@ static int ti_csi2rx_probe(struct platform_device *pdev) if (ret) goto err_vb2q; - ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev); + ret = devm_of_platform_populate(csi->dev); if (ret) { dev_err(csi->dev, "Failed to create children: %d\n", ret); goto err_subdev; diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index f042f3f14afa..314f64420f62 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -736,11 +736,11 @@ int lirc_register(struct rc_dev *dev) cdev_init(&dev->lirc_cdev, &lirc_fops); + get_device(&dev->dev); + err = cdev_device_add(&dev->lirc_cdev, &dev->lirc_dev); if (err) - goto out_ida; - - get_device(&dev->dev); + goto out_put_device; switch (dev->driver_type) { case RC_DRIVER_SCANCODE: @@ -764,7 +764,8 @@ int lirc_register(struct rc_dev *dev) return 0; -out_ida: +out_put_device: + put_device(&dev->lirc_dev); ida_free(&lirc_ida, minor); return err; } diff --git a/drivers/media/test-drivers/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c index 356a988dd6a1..2d15fdd5d999 100644 --- a/drivers/media/test-drivers/vivid/vivid-cec.c +++ b/drivers/media/test-drivers/vivid/vivid-cec.c @@ -327,7 +327,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg) char osd[14]; if (!cec_is_sink(adap)) - return -ENOMSG; + break; cec_ops_set_osd_string(msg, &disp_ctl, osd); switch (disp_ctl) { case CEC_OP_DISP_CTL_DEFAULT: @@ -348,7 +348,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg) cec_transmit_msg(adap, &reply, false); break; } - break; + return 0; } case CEC_MSG_VENDOR_COMMAND_WITH_ID: { u32 vendor_id; @@ -379,7 +379,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg) if (size == 1) { // Ignore even op values if (!(vendor_cmd[0] & 1)) - break; + return 0; reply.len = msg->len; memcpy(reply.msg + 1, msg->msg + 1, msg->len - 1); reply.msg[msg->len - 1]++; @@ -388,12 +388,10 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg) CEC_OP_ABORT_INVALID_OP); } cec_transmit_msg(adap, &reply, false); - break; + return 0; } - default: - return -ENOMSG; } - return 0; + return -ENOMSG; } static const struct cec_adap_ops vivid_cec_adap_ops = { diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c index e73dd330af47..d913fb901973 100644 --- a/drivers/memory/samsung/exynos-srom.c +++ b/drivers/memory/samsung/exynos-srom.c @@ -121,20 +121,18 @@ static int exynos_srom_probe(struct platform_device *pdev) return -ENOMEM; srom->dev = dev; - srom->reg_base = of_iomap(np, 0); - if (!srom->reg_base) { + srom->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(srom->reg_base)) { dev_err(&pdev->dev, "iomap of exynos srom controller failed\n"); - return -ENOMEM; + return PTR_ERR(srom->reg_base); } platform_set_drvdata(pdev, srom); srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets, ARRAY_SIZE(exynos_srom_offsets)); - if (!srom->reg_offset) { - iounmap(srom->reg_base); + if (!srom->reg_offset) return -ENOMEM; - } for_each_child_of_node(np, child) { if (exynos_srom_configure_bank(srom, child)) { diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c index 992855bfda3e..6daf33e07ea0 100644 --- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c +++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c @@ -81,8 +81,9 @@ static struct mfd_cell chtdc_ti_dev[] = { static const struct regmap_config chtdc_ti_regmap_config = { .reg_bits = 8, .val_bits = 8, - .max_register = 128, - .cache_type = REGCACHE_NONE, + .max_register = 0xff, + /* The hardware does not support reading multiple registers at once */ + .use_single_read = true, }; static const struct regmap_irq chtdc_ti_irqs[] = { diff --git a/drivers/mfd/rz-mtu3.c b/drivers/mfd/rz-mtu3.c index f3dac4a29a83..9cdfef610398 100644 --- a/drivers/mfd/rz-mtu3.c +++ b/drivers/mfd/rz-mtu3.c @@ -32,7 +32,7 @@ static const unsigned long rz_mtu3_8bit_ch_reg_offs[][13] = { [RZ_MTU3_CHAN_2] = MTU_8BIT_CH_1_2(0x204, 0x092, 0x205, 0x200, 0x20c, 0x201, 0x202), [RZ_MTU3_CHAN_3] = MTU_8BIT_CH_3_4_6_7(0x008, 0x093, 0x02c, 0x000, 0x04c, 0x002, 0x004, 0x005, 0x038), [RZ_MTU3_CHAN_4] = MTU_8BIT_CH_3_4_6_7(0x009, 0x094, 0x02d, 0x001, 0x04d, 0x003, 0x006, 0x007, 0x039), - [RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x1eb, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6), + [RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x895, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6), [RZ_MTU3_CHAN_6] = MTU_8BIT_CH_3_4_6_7(0x808, 0x893, 0x82c, 0x800, 0x84c, 0x802, 0x804, 0x805, 0x838), [RZ_MTU3_CHAN_7] = MTU_8BIT_CH_3_4_6_7(0x809, 0x894, 0x82d, 0x801, 0x84d, 0x803, 0x806, 0x807, 0x839), [RZ_MTU3_CHAN_8] = MTU_8BIT_CH_8(0x404, 0x098, 0x400, 0x406, 0x401, 0x402, 0x403) diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c index d34d58ce46db..5f26ef733fd0 100644 --- a/drivers/mfd/vexpress-sysreg.c +++ b/drivers/mfd/vexpress-sysreg.c @@ -90,6 +90,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) struct resource *mem; void __iomem *base; struct gpio_chip *mmc_gpio_chip; + int ret; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) @@ -110,7 +111,10 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI, NULL, NULL, NULL, NULL, 0); mmc_gpio_chip->ngpio = 2; - devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL); + + ret = devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL); + if (ret) + return ret; return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, vexpress_sysreg_cells, diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index e567a36275af..9d8e51351ff8 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -323,11 +323,11 @@ static void fastrpc_free_map(struct kref *ref) perm.vmid = QCOM_SCM_VMID_HLOS; perm.perm = QCOM_SCM_PERM_RWX; - err = qcom_scm_assign_mem(map->phys, map->size, + err = qcom_scm_assign_mem(map->phys, map->len, &src_perms, &perm, 1); if (err) { dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", - map->phys, map->size, err); + map->phys, map->len, err); return; } } @@ -363,26 +363,21 @@ static int fastrpc_map_get(struct fastrpc_map *map) static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd, - struct fastrpc_map **ppmap, bool take_ref) + struct fastrpc_map **ppmap) { - struct fastrpc_session_ctx *sess = fl->sctx; struct fastrpc_map *map = NULL; + struct dma_buf *buf; int ret = -ENOENT; + buf = dma_buf_get(fd); + if (IS_ERR(buf)) + return PTR_ERR(buf); + spin_lock(&fl->lock); list_for_each_entry(map, &fl->maps, node) { - if (map->fd != fd) + if (map->fd != fd || map->buf != buf) continue; - if (take_ref) { - ret = fastrpc_map_get(map); - if (ret) { - dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n", - __func__, fd, ret); - break; - } - } - *ppmap = map; ret = 0; break; @@ -752,16 +747,14 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = { .release = fastrpc_release, }; -static int fastrpc_map_create(struct fastrpc_user *fl, int fd, +static int fastrpc_map_attach(struct fastrpc_user *fl, int fd, u64 len, u32 attr, struct fastrpc_map **ppmap) { struct fastrpc_session_ctx *sess = fl->sctx; struct fastrpc_map *map = NULL; struct sg_table *table; - int err = 0; - - if (!fastrpc_map_lookup(fl, fd, ppmap, true)) - return 0; + struct scatterlist *sgl = NULL; + int err = 0, sgl_index = 0; map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) @@ -798,7 +791,15 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, map->phys = sg_dma_address(map->table->sgl); map->phys += ((u64)fl->sctx->sid << 32); } - map->size = len; + for_each_sg(map->table->sgl, sgl, map->table->nents, + sgl_index) + map->size += sg_dma_len(sgl); + if (len > map->size) { + dev_dbg(sess->dev, "Bad size passed len 0x%llx map size 0x%llx\n", + len, map->size); + err = -EINVAL; + goto map_err; + } map->va = sg_virt(map->table->sgl); map->len = len; @@ -815,10 +816,10 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; dst_perms[1].perm = QCOM_SCM_PERM_RWX; map->attr = attr; - err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2); + err = qcom_scm_assign_mem(map->phys, (u64)map->len, &src_perms, dst_perms, 2); if (err) { dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", - map->phys, map->size, err); + map->phys, map->len, err); goto map_err; } } @@ -839,6 +840,24 @@ get_err: return err; } +static int fastrpc_map_create(struct fastrpc_user *fl, int fd, + u64 len, u32 attr, struct fastrpc_map **ppmap) +{ + struct fastrpc_session_ctx *sess = fl->sctx; + int err = 0; + + if (!fastrpc_map_lookup(fl, fd, ppmap)) { + if (!fastrpc_map_get(*ppmap)) + return 0; + dev_dbg(sess->dev, "%s: Failed to get map fd=%d\n", + __func__, fd); + } + + err = fastrpc_map_attach(fl, fd, len, attr, ppmap); + + return err; +} + /* * Fastrpc payload buffer with metadata looks like: * @@ -911,8 +930,12 @@ static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx) ctx->args[i].length == 0) continue; - err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, - ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); + if (i < ctx->nbufs) + err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, + ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); + else + err = fastrpc_map_attach(ctx->fl, ctx->args[i].fd, + ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); if (err) { dev_err(dev, "Error Creating map %d\n", err); return -EINVAL; @@ -1071,6 +1094,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, struct fastrpc_phy_page *pages; u64 *fdlist; int i, inbufs, outbufs, handles; + int ret = 0; inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); @@ -1086,23 +1110,26 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, u64 len = rpra[i].buf.len; if (!kernel) { - if (copy_to_user((void __user *)dst, src, len)) - return -EFAULT; + if (copy_to_user((void __user *)dst, src, len)) { + ret = -EFAULT; + goto cleanup_fdlist; + } } else { memcpy(dst, src, len); } } } +cleanup_fdlist: /* Clean up fdlist which is updated by DSP */ for (i = 0; i < FASTRPC_MAX_FDLIST; i++) { if (!fdlist[i]) break; - if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false)) + if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap)) fastrpc_map_put(mmap); } - return 0; + return ret; } static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, @@ -2044,7 +2071,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) args[0].length = sizeof(req_msg); pages.addr = map->phys; - pages.size = map->size; + pages.size = map->len; args[1].ptr = (u64) (uintptr_t) &pages; args[1].length = sizeof(pages); @@ -2059,7 +2086,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); if (err) { dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n", - req.fd, req.vaddrin, map->size); + req.fd, req.vaddrin, map->len); goto err_invoke; } @@ -2072,7 +2099,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) if (copy_to_user((void __user *)argp, &req, sizeof(req))) { /* unmap the memory and release the buffer */ req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr; - req_unmap.length = map->size; + req_unmap.length = map->len; fastrpc_req_mem_unmap_impl(fl, &req_unmap); return -EFAULT; } diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c index 500b1feaf1f6..fd7d5cd50d39 100644 --- a/drivers/misc/genwqe/card_ddcb.c +++ b/drivers/misc/genwqe/card_ddcb.c @@ -923,7 +923,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, } if (cmd->asv_length > DDCB_ASV_LENGTH) { dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n", - __func__, cmd->asiv_length); + __func__, cmd->asv_length); return -EINVAL; } rc = __genwqe_enqueue_ddcb(cd, req, f_flags); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 1d08009f2bd8..08b8276e1da9 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2876,15 +2876,15 @@ static int mmc_route_rpmb_frames(struct device *dev, u8 *req, return -ENOMEM; if (write) { - struct rpmb_frame *frm = (struct rpmb_frame *)resp; + struct rpmb_frame *resp_frm = (struct rpmb_frame *)resp; /* Send write request frame(s) */ set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, 1 | MMC_CMD23_ARG_REL_WR, req, req_len); /* Send result request frame */ - memset(frm, 0, sizeof(*frm)); - frm->req_resp = cpu_to_be16(RPMB_RESULT_READ); + memset(resp_frm, 0, sizeof(*resp_frm)); + resp_frm->req_resp = cpu_to_be16(RPMB_RESULT_READ); set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp, resp_len); diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 4b19b8a16b09..2e37700ae36e 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -945,7 +945,11 @@ static void mmc_sdio_remove(struct mmc_host *host) */ static int mmc_sdio_alive(struct mmc_host *host) { - return mmc_select_card(host->card); + if (!mmc_host_is_spi(host)) + return mmc_select_card(host->card); + else + return mmc_io_rw_direct(host->card, 0, 0, SDIO_CCCR_CCCR, 0, + NULL); } /* diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 47443fb5eb33..f42d5f9c48c1 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -563,7 +563,7 @@ mmc_spi_setup_data_message(struct mmc_spi_host *host, bool multiple, bool write) * the next token (next data block, or STOP_TRAN). We can try to * minimize I/O ops by using a single read to collect end-of-busy. */ - if (multiple || write) { + if (write) { t = &host->early_status; memset(t, 0, sizeof(*t)); t->len = write ? sizeof(scratch->status) : 1; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 5b02119d8ba2..543a0be9dc64 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -1858,7 +1858,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc) static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc) { - struct device_node *np, *nand_np; + struct device_node *np; struct device *dev = nc->dev; int ret, reg_cells; u32 val; @@ -1885,7 +1885,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc) reg_cells += val; - for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { struct atmel_nand *nand; nand = atmel_nand_create(nc, nand_np, reg_cells); diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index fe5912d31bee..b0a70badf3eb 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -876,10 +876,14 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev, if (!of_property_read_u32(np, "bank-width", &val)) { if (val == 2) { nand->options |= NAND_BUSWIDTH_16; - } else if (val != 1) { + } else if (val == 1) { + nand->options |= NAND_BUSWIDTH_AUTO; + } else { dev_err(&pdev->dev, "invalid bank-width %u\n", val); return -EINVAL; } + } else { + nand->options |= NAND_BUSWIDTH_AUTO; } if (of_property_read_bool(np, "nand-skip-bbtscan")) diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 60fb35ec4b15..0b2e257b591f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -869,7 +869,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev) static u32 ena_get_rxfh_key_size(struct net_device *netdev) { - return ENA_HASH_KEY_SIZE; + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_rss *rss = &adapter->ena_dev->rss; + + return rss->hash_key ? ENA_HASH_KEY_SIZE : 0; } static int ena_indirection_table_set(struct ena_adapter *adapter, diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 2c1b551e1442..92856cf387c7 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -953,15 +953,18 @@ receive_packet (struct net_device *dev) } else { struct sk_buff *skb; + skb = NULL; /* Small skbuffs for short packets */ - if (pkt_len > copy_thresh) { + if (pkt_len <= copy_thresh) + skb = netdev_alloc_skb_ip_align(dev, pkt_len); + if (!skb) { dma_unmap_single(&np->pdev->dev, desc_to_dma(desc), np->rx_buf_sz, DMA_FROM_DEVICE); skb_put (skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; - } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { + } else { dma_sync_single_for_cpu(&np->pdev->dev, desc_to_dma(desc), np->rx_buf_sz, diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 026f7270a54d..fb5d12a87872 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -479,10 +479,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) "missing 'reg' property in node %pOF\n", tbi); err = -EBUSY; + of_node_put(tbi); goto error; } set_tbipa(*prop, pdev, data->get_tbipa, priv->map, &res); + of_node_put(tbi); } } diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c index 10285995c9ed..cc4798026182 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.c +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -98,19 +98,21 @@ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev) index = ice_adapter_xa_index(pdev); scoped_guard(mutex, &ice_adapters_mutex) { - err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); - if (err == -EBUSY) { - adapter = xa_load(&ice_adapters, index); + adapter = xa_load(&ice_adapters, index); + if (adapter) { refcount_inc(&adapter->refcount); WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev)); return adapter; } + err = xa_reserve(&ice_adapters, index, GFP_KERNEL); if (err) return ERR_PTR(err); adapter = ice_adapter_new(pdev); - if (!adapter) + if (!adapter) { + xa_release(&ice_adapters, index); return ERR_PTR(-ENOMEM); + } xa_store(&ice_adapters, index, adapter, GFP_KERNEL); } return adapter; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 4086a6ef352e..087a3077d548 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3216,18 +3216,14 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) /* get the Rx desc from Rx queue based on 'next_to_clean' */ rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb; - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc - */ - dma_rmb(); - /* if the descriptor isn't done, no work yet to do */ gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M); - if (idpf_queue_has(GEN_CHK, rxq) != gen_id) break; + dma_rmb(); + rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M, rx_desc->rxdid_ucast); if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) { diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index f27a8cf3816d..d1f374da0098 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -727,9 +727,9 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter) /* If post failed clear the only buffer we supplied */ if (post_err) { if (dma_mem) - dmam_free_coherent(&adapter->pdev->dev, - dma_mem->size, dma_mem->va, - dma_mem->pa); + dma_free_coherent(&adapter->pdev->dev, + dma_mem->size, dma_mem->va, + dma_mem->pa); break; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 281b34af0bb4..3160a1a2d2ab 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1180,9 +1180,9 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, mlx4_unregister_mac(mdev->dev, priv->port, mac); hlist_del_rcu(&entry->hlist); - kfree_rcu(entry, rcu); en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", entry->mac, priv->port); + kfree_rcu(entry, rcu); ++removed; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 5bb4940da59d..b51c00627759 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -289,6 +289,10 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent) return; } cond_resched(); + if (mlx5_cmd_is_down(dev)) { + ent->ret = -ENXIO; + return; + } } while (time_before(jiffies, poll_end)); ent->ret = -ETIMEDOUT; @@ -1066,7 +1070,7 @@ static void cmd_work_handler(struct work_struct *work) poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); - mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT)); + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h index 66d276a1be83..f4a19ffbb641 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h @@ -66,23 +66,11 @@ struct mlx5e_port_buffer { struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER]; }; -#ifdef CONFIG_MLX5_CORE_EN_DCB int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, u32 change, unsigned int mtu, struct ieee_pfc *pfc, u32 *buffer_size, u8 *prio2buffer); -#else -static inline int -mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, - u32 change, unsigned int mtu, - void *pfc, - u32 *buffer_size, - u8 *prio2buffer) -{ - return 0; -} -#endif int mlx5e_port_query_buffer(struct mlx5e_priv *priv, struct mlx5e_port_buffer *port_buffer); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index de2327ffb0f7..4a2f58a9d706 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -47,7 +47,6 @@ #include "en.h" #include "en/dim.h" #include "en/txrx.h" -#include "en/port_buffer.h" #include "en_tc.h" #include "en_rep.h" #include "en_accel/ipsec.h" @@ -2918,11 +2917,9 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) struct mlx5e_params *params = &priv->channels.params; struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - u16 mtu, prev_mtu; + u16 mtu; int err; - mlx5e_query_mtu(mdev, params, &prev_mtu); - err = mlx5e_set_mtu(mdev, params, params->sw_mtu); if (err) return err; @@ -2932,18 +2929,6 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", __func__, mtu, params->sw_mtu); - if (mtu != prev_mtu && MLX5_BUFFER_SUPPORTED(mdev)) { - err = mlx5e_port_manual_buffer_config(priv, 0, mtu, - NULL, NULL, NULL); - if (err) { - netdev_warn(netdev, "%s: Failed to set Xon/Xoff values with MTU %d (err %d), setting back to previous MTU %d\n", - __func__, mtu, err, prev_mtu); - - mlx5e_set_mtu(mdev, params, prev_mtu); - return err; - } - } - params->sw_mtu = mtu; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 516df7f1997e..35d2fe08c0fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -27,6 +27,7 @@ struct mlx5_fw_reset { struct work_struct reset_reload_work; struct work_struct reset_now_work; struct work_struct reset_abort_work; + struct delayed_work reset_timeout_work; unsigned long reset_flags; u8 reset_method; struct timer_list timer; @@ -258,6 +259,8 @@ static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool return -EALREADY; } + if (current_work() != &fw_reset->reset_timeout_work.work) + cancel_delayed_work(&fw_reset->reset_timeout_work); mlx5_stop_sync_reset_poll(dev); if (poll_health) mlx5_start_health_poll(dev); @@ -328,6 +331,11 @@ static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev) } mlx5_stop_health_poll(dev, true); mlx5_start_sync_reset_poll(dev); + + if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, + &fw_reset->reset_flags)) + schedule_delayed_work(&fw_reset->reset_timeout_work, + msecs_to_jiffies(mlx5_tout_ms(dev, PCI_SYNC_UPDATE))); return 0; } @@ -728,6 +736,19 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct } } +static void mlx5_sync_reset_timeout_work(struct work_struct *work) +{ + struct delayed_work *dwork = container_of(work, struct delayed_work, + work); + struct mlx5_fw_reset *fw_reset = + container_of(dwork, struct mlx5_fw_reset, reset_timeout_work); + struct mlx5_core_dev *dev = fw_reset->dev; + + if (mlx5_sync_reset_clear_reset_requested(dev, true)) + return; + mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n"); +} + static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb); @@ -811,6 +832,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev) cancel_work_sync(&fw_reset->reset_reload_work); cancel_work_sync(&fw_reset->reset_now_work); cancel_work_sync(&fw_reset->reset_abort_work); + cancel_delayed_work(&fw_reset->reset_timeout_work); } static const struct devlink_param mlx5_fw_reset_devlink_params[] = { @@ -854,6 +876,8 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev) INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work); INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event); INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event); + INIT_DELAYED_WORK(&fw_reset->reset_timeout_work, + mlx5_sync_reset_timeout_work); init_completion(&fw_reset->done); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 9bc9bd83c232..cd68c4b2c0bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -489,9 +489,12 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 func_id; u32 npages; u32 i = 0; + int err; - if (!mlx5_cmd_is_down(dev)) - return mlx5_cmd_do(dev, in, in_size, out, out_size); + err = mlx5_cmd_do(dev, in, in_size, out, out_size); + /* If FW is gone (-ENXIO), proceed to forceful reclaim */ + if (err != -ENXIO) + return err; /* No hard feelings, we want our pages back! */ npages = MLX5_GET(manage_pages_in, in, input_num_entries); diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c index c018783757fb..858d4cbc83d3 100644 --- a/drivers/net/ethernet/mscc/ocelot_stats.c +++ b/drivers/net/ethernet/mscc/ocelot_stats.c @@ -984,6 +984,6 @@ int ocelot_stats_init(struct ocelot *ocelot) void ocelot_stats_deinit(struct ocelot *ocelot) { - cancel_delayed_work(&ocelot->stats_work); + disable_delayed_work_sync(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index fbca8d0efd85..37a46596268a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1789,7 +1789,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev) struct nfp_net *nn = netdev_priv(netdev); if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)) - return -EOPNOTSUPP; + return 0; return nfp_net_rss_key_sz(nn); } diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 792ddda1ad49..85bd5d845409 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -625,6 +625,21 @@ static void ax88772_suspend(struct usbnet *dev) asix_read_medium_status(dev, 1)); } +/* Notes on PM callbacks and locking context: + * + * - asix_suspend()/asix_resume() are invoked for both runtime PM and + * system-wide suspend/resume. For struct usb_driver the ->resume() + * callback does not receive pm_message_t, so the resume type cannot + * be distinguished here. + * + * - The MAC driver must hold RTNL when calling phylink interfaces such as + * phylink_suspend()/resume(). Those calls will also perform MDIO I/O. + * + * - Taking RTNL and doing MDIO from a runtime-PM resume callback (while + * the USB PM lock is held) is fragile. Since autosuspend brings no + * measurable power saving here, we block it by holding a PM usage + * reference in ax88772_bind(). + */ static int asix_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); @@ -919,6 +934,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) if (ret) goto initphy_err; + /* Keep this interface runtime-PM active by taking a usage ref. + * Prevents runtime suspend while bound and avoids resume paths + * that could deadlock (autoresume under RTNL while USB PM lock + * is held, phylink/MDIO wants RTNL). + */ + pm_runtime_get_noresume(&intf->dev); + return 0; initphy_err: @@ -948,6 +970,8 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) phylink_destroy(priv->phylink); ax88772_mdio_unregister(priv); asix_rx_fixup_common_free(dev->driver_priv); + /* Drop the PM usage ref taken in bind() */ + pm_runtime_put(&intf->dev); } static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -1600,6 +1624,11 @@ static struct usb_driver asix_driver = { .resume = asix_resume, .reset_resume = asix_resume, .disconnect = usbnet_disconnect, + /* usbnet enables autosuspend by default (supports_autosuspend=1). + * We keep runtime-PM active for AX88772* by taking a PM usage + * reference in ax88772_bind() (pm_runtime_get_noresume()) and + * dropping it in unbind(), which effectively blocks autosuspend. + */ .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index ddff6f19ff98..92add3daadbb 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -664,7 +664,6 @@ static void rtl8150_set_multicast(struct net_device *netdev) rtl8150_t *dev = netdev_priv(netdev); u16 rx_creg = 0x9e; - netif_stop_queue(netdev); if (netdev->flags & IFF_PROMISC) { rx_creg |= 0x0001; dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name); @@ -678,7 +677,6 @@ static void rtl8150_set_multicast(struct net_device *netdev) rx_creg &= 0x00fc; } async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg); - netif_wake_queue(netdev); } static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 09066e6aca40..fdab67a56e43 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1764,33 +1764,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch, int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) { + unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ; unsigned long time_left, i; - time_left = wait_for_completion_timeout(&ar->wmi.service_ready, - WMI_SERVICE_READY_TIMEOUT_HZ); - if (!time_left) { - /* Sometimes the PCI HIF doesn't receive interrupt - * for the service ready message even if the buffer - * was completed. PCIe sniffer shows that it's - * because the corresponding CE ring doesn't fires - * it. Workaround here by polling CE rings once. - */ - ath10k_warn(ar, "failed to receive service ready completion, polling..\n"); - + /* Sometimes the PCI HIF doesn't receive interrupt + * for the service ready message even if the buffer + * was completed. PCIe sniffer shows that it's + * because the corresponding CE ring doesn't fires + * it. Workaround here by polling CE rings. Since + * the message could arrive at any time, continue + * polling until timeout. + */ + do { for (i = 0; i < CE_COUNT; i++) ath10k_hif_send_complete_check(ar, i, 1); + /* The 100 ms granularity is a tradeoff considering scheduler + * overhead and response latency + */ time_left = wait_for_completion_timeout(&ar->wmi.service_ready, - WMI_SERVICE_READY_TIMEOUT_HZ); - if (!time_left) { - ath10k_warn(ar, "polling timed out\n"); - return -ETIMEDOUT; - } - - ath10k_warn(ar, "service ready completion received, continuing normally\n"); - } + msecs_to_jiffies(100)); + if (time_left) + return 0; + } while (time_before(jiffies, timeout)); - return 0; + ath10k_warn(ar, "failed to receive service ready completion\n"); + return -ETIMEDOUT; } int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index bb46ef986b2a..afac4a1e9a1d 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1936,14 +1936,10 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab) mutex_unlock(&ab->core_lock); ath11k_dp_free(ab); - ath11k_hal_srng_deinit(ab); + ath11k_hal_srng_clear(ab); ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1; - ret = ath11k_hal_srng_init(ab); - if (ret) - return ret; - clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags); ret = ath11k_core_qmi_firmware_ready(ab); diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 65e52ab742b4..e47774b6d992 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -1383,6 +1383,22 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab) } EXPORT_SYMBOL(ath11k_hal_srng_deinit); +void ath11k_hal_srng_clear(struct ath11k_base *ab) +{ + /* No need to memset rdp and wrp memory since each individual + * segment would get cleared in ath11k_hal_srng_src_hw_init() + * and ath11k_hal_srng_dst_hw_init(). + */ + memset(ab->hal.srng_list, 0, + sizeof(ab->hal.srng_list)); + memset(ab->hal.shadow_reg_addr, 0, + sizeof(ab->hal.shadow_reg_addr)); + ab->hal.avail_blk_resource = 0; + ab->hal.current_blk_index = 0; + ab->hal.num_shadow_reg_configured = 0; +} +EXPORT_SYMBOL(ath11k_hal_srng_clear); + void ath11k_hal_dump_srng_stats(struct ath11k_base *ab) { struct hal_srng *srng; diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index dc8bbe073017..0f725668b819 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -965,6 +965,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, struct hal_srng_params *params); int ath11k_hal_srng_init(struct ath11k_base *ath11k); void ath11k_hal_srng_deinit(struct ath11k_base *ath11k); +void ath11k_hal_srng_clear(struct ath11k_base *ab); void ath11k_hal_dump_srng_stats(struct ath11k_base *ab); void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab, u32 **cfg, u32 *len); diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c index b66d23d6b2bd..bd21e8fe9c90 100644 --- a/drivers/net/wireless/ath/ath12k/ce.c +++ b/drivers/net/wireless/ath/ath12k/ce.c @@ -388,7 +388,7 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe) } while ((skb = __skb_dequeue(&list))) { - ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n", + ath12k_dbg(ab, ATH12K_DBG_CE, "rx ce pipe %d len %d\n", pipe->pipe_num, skb->len); pipe->recv_cb(ab, skb); } diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h index f7005917362c..ea711e02ca03 100644 --- a/drivers/net/wireless/ath/ath12k/debug.h +++ b/drivers/net/wireless/ath/ath12k/debug.h @@ -26,6 +26,7 @@ enum ath12k_debug_mask { ATH12K_DBG_DP_TX = 0x00002000, ATH12K_DBG_DP_RX = 0x00004000, ATH12K_DBG_WOW = 0x00008000, + ATH12K_DBG_CE = 0x00010000, ATH12K_DBG_ANY = 0xffffffff, }; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h index 81787501d4a4..11704163876b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h @@ -12,7 +12,6 @@ #include "fw/api/phy.h" #include "fw/api/config.h" #include "fw/api/nvm-reg.h" -#include "fw/img.h" #include "iwl-trans.h" #define BIOS_SAR_MAX_PROFILE_NUM 4 diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 59bea82eab29..8801b93eacd4 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -684,10 +684,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy, return; } - /* Don't send world or same regdom info to firmware */ - if (strncmp(request->alpha2, "00", 2) && - strncmp(request->alpha2, adapter->country_code, - sizeof(request->alpha2))) { + /* Don't send same regdom info to firmware */ + if (strncmp(request->alpha2, adapter->country_code, + sizeof(request->alpha2)) != 0) { memcpy(adapter->country_code, request->alpha2, sizeof(request->alpha2)); mwifiex_send_domain_info_cmd_fw(wiphy); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c index ec02148a7f1f..2ee8a6e1e310 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c @@ -48,7 +48,7 @@ mt76_wmac_probe(struct platform_device *pdev) return 0; error: - ieee80211_free_hw(mt76_hw(dev)); + mt76_free_device(mdev); return ret; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h index 509fb43d8a68..2908e8113e48 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h @@ -50,9 +50,9 @@ enum mt7915_eeprom_field { #define MT_EE_CAL_GROUP_SIZE_7975 (54 * MT_EE_CAL_UNIT + 16) #define MT_EE_CAL_GROUP_SIZE_7976 (94 * MT_EE_CAL_UNIT + 16) #define MT_EE_CAL_GROUP_SIZE_7916_6G (94 * MT_EE_CAL_UNIT + 16) +#define MT_EE_CAL_GROUP_SIZE_7981 (144 * MT_EE_CAL_UNIT + 16) #define MT_EE_CAL_DPD_SIZE_V1 (54 * MT_EE_CAL_UNIT) #define MT_EE_CAL_DPD_SIZE_V2 (300 * MT_EE_CAL_UNIT) -#define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */ #define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0) #define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6) @@ -179,6 +179,8 @@ mt7915_get_cal_group_size(struct mt7915_dev *dev) val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val); return (val == MT_EE_V2_BAND_SEL_6GHZ) ? MT_EE_CAL_GROUP_SIZE_7916_6G : MT_EE_CAL_GROUP_SIZE_7916; + } else if (is_mt7981(&dev->mt76)) { + return MT_EE_CAL_GROUP_SIZE_7981; } else if (mt7915_check_adie(dev, false)) { return MT_EE_CAL_GROUP_SIZE_7976; } else { @@ -191,8 +193,6 @@ mt7915_get_cal_dpd_size(struct mt7915_dev *dev) { if (is_mt7915(&dev->mt76)) return MT_EE_CAL_DPD_SIZE_V1; - else if (is_mt7981(&dev->mt76)) - return MT_EE_CAL_DPD_SIZE_V2_7981; else return MT_EE_CAL_DPD_SIZE_V2; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 3398c25cb03c..7b481aea76b6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -3004,30 +3004,15 @@ static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw) /* 5G BW160 */ 5250, 5570, 5815 }; - static const u16 freq_list_v2_7981[] = { - /* 5G BW20 */ - 5180, 5200, 5220, 5240, - 5260, 5280, 5300, 5320, - 5500, 5520, 5540, 5560, - 5580, 5600, 5620, 5640, - 5660, 5680, 5700, 5720, - 5745, 5765, 5785, 5805, - 5825, 5845, 5865, 5885, - /* 5G BW160 */ - 5250, 5570, 5815 - }; - const u16 *freq_list = freq_list_v1; - int n_freqs = ARRAY_SIZE(freq_list_v1); - int idx; + const u16 *freq_list; + int idx, n_freqs; if (!is_mt7915(&dev->mt76)) { - if (is_mt7981(&dev->mt76)) { - freq_list = freq_list_v2_7981; - n_freqs = ARRAY_SIZE(freq_list_v2_7981); - } else { - freq_list = freq_list_v2; - n_freqs = ARRAY_SIZE(freq_list_v2); - } + freq_list = freq_list_v2; + n_freqs = ARRAY_SIZE(freq_list_v2); + } else { + freq_list = freq_list_v1; + n_freqs = ARRAY_SIZE(freq_list_v1); } if (freq < 4000) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c index e3459295ad88..5be0edb2fe9a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c @@ -21,6 +21,9 @@ static const struct usb_device_id mt7921u_device_table[] = { /* Netgear, Inc. [A8000,AXE3000] */ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM }, + /* Netgear, Inc. A7500 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9065, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM }, /* TP-Link TXE50UH */ { USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM }, diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c index 682db1bab21c..a63ff630eaba 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c @@ -12,6 +12,9 @@ static const struct usb_device_id mt7925u_device_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7925, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM }, + /* Netgear, Inc. A9000 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9072, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM }, { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c index c55038554114..91b7d35bdb43 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c @@ -679,6 +679,7 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev) static int mt7996_wed_rro_init(struct mt7996_dev *dev) { #ifdef CONFIG_NET_MEDIATEK_SOC_WED + u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff); struct mtk_wed_device *wed = &dev->mt76.mmio.wed; u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0; struct mt7996_wed_rro_addr *addr; @@ -718,7 +719,7 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev) addr = dev->wed_rro.addr_elem[i].ptr; for (j = 0; j < MT7996_RRO_WINDOW_MAX_SIZE; j++) { - addr->signature = 0xff; + addr->data = cpu_to_le32(val); addr++; } @@ -736,7 +737,7 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev) dev->wed_rro.session.ptr = ptr; addr = dev->wed_rro.session.ptr; for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) { - addr->signature = 0xff; + addr->data = cpu_to_le32(val); addr++; } @@ -836,6 +837,7 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev) static void mt7996_wed_rro_work(struct work_struct *work) { #ifdef CONFIG_NET_MEDIATEK_SOC_WED + u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff); struct mt7996_dev *dev; LIST_HEAD(list); @@ -872,7 +874,7 @@ static void mt7996_wed_rro_work(struct work_struct *work) MT7996_RRO_WINDOW_MAX_LEN; reset: elem = ptr + elem_id * sizeof(*elem); - elem->signature = 0xff; + elem->data |= cpu_to_le32(val); } mt7996_mcu_wed_rro_reset_sessions(dev, e->id); out: diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index 425fd030bee0..29e7289c3a16 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -194,13 +194,12 @@ struct mt7996_hif { int irq; }; +#define WED_RRO_ADDR_SIGNATURE_MASK GENMASK(31, 24) +#define WED_RRO_ADDR_COUNT_MASK GENMASK(14, 4) +#define WED_RRO_ADDR_HEAD_HIGH_MASK GENMASK(3, 0) struct mt7996_wed_rro_addr { - u32 head_low; - u32 head_high : 4; - u32 count: 11; - u32 oor: 1; - u32 rsv : 8; - u32 signature : 8; + __le32 head_low; + __le32 data; }; struct mt7996_wed_rro_session_id { diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c index 04056181368a..dbd05612f2e4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c @@ -132,6 +132,7 @@ static int mt7996_pci_probe(struct pci_dev *pdev, mdev = &dev->mt76; mt7996_wfsys_reset(dev); hif2 = mt7996_pci_init_hif2(pdev); + dev->hif2 = hif2; ret = mt7996_mmio_wed_init(dev, pdev, false, &irq); if (ret < 0) @@ -156,7 +157,6 @@ static int mt7996_pci_probe(struct pci_dev *pdev, if (hif2) { hif2_dev = container_of(hif2->dev, struct pci_dev, dev); - dev->hif2 = hif2; ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &hif2_irq); if (ret < 0) diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index c0f0e3d71f5f..2a303a758e27 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -207,7 +207,6 @@ static void rtw89_ser_hdl_work(struct work_struct *work) static int ser_send_msg(struct rtw89_ser *ser, u8 event) { - struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); struct ser_msg *msg = NULL; if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) @@ -223,7 +222,7 @@ static int ser_send_msg(struct rtw89_ser *ser, u8 event) list_add(&msg->list, &ser->msg_q); spin_unlock_irq(&ser->msg_q_lock); - ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work); + schedule_work(&ser->ser_hdl_work); return 0; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index fdc9f1df0578..ff004350dc2c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3140,10 +3140,12 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND * because of high power consumption (> 2 Watt) in s2idle * sleep. Only some boards with Intel CPU are affected. + * (Note for testing: Samsung 990 Evo Plus has same PCI ID) */ if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") || dmi_match(DMI_BOARD_NAME, "GMxPXxx") || dmi_match(DMI_BOARD_NAME, "GXxMRXx") || + dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") || dmi_match(DMI_BOARD_NAME, "PH4PG31") || dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") || dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71")) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index ef8c5961e10c..0ade23610ae6 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -54,6 +54,8 @@ struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ int ls_error; struct list_head lsreq_list; /* tgtport->ls_req_list */ bool req_queued; + + struct work_struct put_work; }; @@ -111,8 +113,6 @@ struct nvmet_fc_tgtport { struct nvmet_fc_port_entry *pe; struct kref ref; u32 max_sg_cnt; - - struct work_struct put_work; }; struct nvmet_fc_port_entry { @@ -235,12 +235,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); -static void nvmet_fc_put_tgtport_work(struct work_struct *work) +static void nvmet_fc_put_lsop_work(struct work_struct *work) { - struct nvmet_fc_tgtport *tgtport = - container_of(work, struct nvmet_fc_tgtport, put_work); + struct nvmet_fc_ls_req_op *lsop = + container_of(work, struct nvmet_fc_ls_req_op, put_work); - nvmet_fc_tgtport_put(tgtport); + nvmet_fc_tgtport_put(lsop->tgtport); + kfree(lsop); } static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, @@ -367,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) DMA_BIDIRECTIONAL); out_putwork: - queue_work(nvmet_wq, &tgtport->put_work); + queue_work(nvmet_wq, &lsop->put_work); } static int @@ -388,6 +389,7 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, lsreq->done = done; lsop->req_queued = false; INIT_LIST_HEAD(&lsop->lsreq_list); + INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work); lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, lsreq->rqstlen + lsreq->rsplen, @@ -447,8 +449,6 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) __nvmet_fc_finish_ls_req(lsop); /* fc-nvme target doesn't care about success or failure of cmd */ - - kfree(lsop); } /* @@ -1412,7 +1412,6 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); newrec->max_sg_cnt = template->max_sgl_segments; - INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); ret = nvmet_fc_alloc_ls_iodlist(newrec); if (ret) { diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 9a72f75e5c2d..63b5b435bd3a 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -4191,6 +4191,7 @@ static int of_unittest_pci_node_verify(struct pci_dev *pdev, bool add) unittest(!np, "Child device tree node is not removed\n"); child_dev = device_find_any_child(&pdev->dev); unittest(!child_dev, "Child device is not removed\n"); + put_device(child_dev); } failed: diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index bae829ac759e..c014220d2b24 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -277,6 +277,25 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) if (!ret) offset = args.args[0]; + /* + * The PCIe Controller's registers have different "reset-values" + * depending on the "strap" settings programmed into the PCIEn_CTRL + * register within the CTRL_MMR memory-mapped register space. + * The registers latch onto a "reset-value" based on the "strap" + * settings sampled after the PCIe Controller is powered on. + * To ensure that the "reset-values" are sampled accurately, power + * off the PCIe Controller before programming the "strap" settings + * and power it on after that. The runtime PM APIs namely + * pm_runtime_put_sync() and pm_runtime_get_sync() will decrement and + * increment the usage counter respectively, causing GENPD to power off + * and power on the PCIe Controller. + */ + ret = pm_runtime_put_sync(dev); + if (ret < 0) { + dev_err(dev, "Failed to power off PCIe Controller\n"); + return ret; + } + ret = j721e_pcie_set_mode(pcie, syscon, offset); if (ret < 0) { dev_err(dev, "Failed to set pci mode\n"); @@ -295,6 +314,12 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) return ret; } + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "Failed to power on PCIe Controller\n"); + return ret; + } + /* Enable ACSPCIE refclk output if the optional property exists */ syscon = syscon_regmap_lookup_by_phandle_optional(node, "ti,syscon-acspcie-proxy-ctrl"); @@ -531,7 +556,7 @@ static int j721e_pcie_probe(struct platform_device *pdev) ret = j721e_pcie_ctrl_init(pcie); if (ret < 0) { - dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n"); + dev_err_probe(dev, ret, "j721e_pcie_ctrl_init failed\n"); goto err_get_sync; } diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 44b34559de1a..4f75d13fe1de 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -1202,8 +1202,8 @@ static int ks_pcie_probe(struct platform_device *pdev) if (irq < 0) return irq; - ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED, - "ks-pcie-error-irq", ks_pcie); + ret = devm_request_irq(dev, irq, ks_pcie_err_irq_handler, IRQF_SHARED, + "ks-pcie-error-irq", ks_pcie); if (ret < 0) { dev_err(dev, "failed to request error IRQ %d\n", irq); diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c index 5d77a0164860..397c2f9477a1 100644 --- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c +++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c @@ -182,8 +182,17 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar) return ret; } - if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc)) + if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc)) { reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc); + /* + * R-Car V4H Reference Manual R19UH0186EJ0130 Rev.1.30 Apr. + * 21, 2025 page 585 Figure 9.3.2 Software Reset flow (B) + * indicates that for peripherals in HSC domain, after + * reset has been asserted by writing a matching reset bit + * into register SRCR, it is mandatory to wait 1ms. + */ + fsleep(1000); + } val = readl(rcar->base + PCIEMSR0); if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) { @@ -204,6 +213,19 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar) if (ret) goto err_unprepare; + /* + * Assure the reset is latched and the core is ready for DBI access. + * On R-Car V4H, the PCIe reset is asynchronous and does not take + * effect immediately, but needs a short time to complete. In case + * DBI access happens in that short time, that access generates an + * SError. To make sure that condition can never happen, read back the + * state of the reset, which should turn the asynchronous reset into + * synchronous one, and wait a little over 1ms to add additional + * safety margin. + */ + reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc); + fsleep(1000); + if (rcar->drvdata->additional_common_init) rcar->drvdata->additional_common_init(rcar); @@ -701,7 +723,7 @@ static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22)); rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16)); rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6)); - rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(11, 0)); + rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(1, 0)); rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15)); rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26)); rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0); @@ -711,7 +733,7 @@ static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable val &= ~APP_HOLD_PHY_RST; writel(val, rcar->base + PCIERSTCTRL1); - ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, !(val & BIT(18)), 100, 10000); + ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, val & BIT(18), 100, 10000); if (ret < 0) return ret; diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index ced3b7e7bdad..c2d626b090e3 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -1205,6 +1205,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, struct mrq_uphy_response resp; struct tegra_bpmp_message msg; struct mrq_uphy_request req; + int err; /* * Controller-5 doesn't need to have its state set by BPMP-FW in @@ -1227,7 +1228,13 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, msg.rx.data = &resp; msg.rx.size = sizeof(resp); - return tegra_bpmp_transfer(pcie->bpmp, &msg); + err = tegra_bpmp_transfer(pcie->bpmp, &msg); + if (err) + return err; + if (msg.rx.ret) + return -EINVAL; + + return 0; } static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, @@ -1236,6 +1243,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, struct mrq_uphy_response resp; struct tegra_bpmp_message msg; struct mrq_uphy_request req; + int err; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); @@ -1255,7 +1263,13 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, msg.rx.data = &resp; msg.rx.size = sizeof(resp); - return tegra_bpmp_transfer(pcie->bpmp, &msg); + err = tegra_bpmp_transfer(pcie->bpmp, &msg); + if (err) + return err; + if (msg.rx.ret) + return -EINVAL; + + return 0; } static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) @@ -1721,9 +1735,9 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) ret); } - ret = tegra_pcie_bpmp_set_pll_state(pcie, false); + ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); if (ret) - dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); + dev_err(pcie->dev, "Failed to disable controller: %d\n", ret); pcie->ep_state = EP_STATE_DISABLED; dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); @@ -1940,6 +1954,15 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) return IRQ_HANDLED; } +static void tegra_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar; + + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) + dw_pcie_ep_reset_bar(pci, bar); +}; + static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq) { /* Tegra194 supports only INTA */ @@ -1954,10 +1977,10 @@ static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq) static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) { - if (unlikely(irq > 31)) + if (unlikely(irq > 32)) return -EINVAL; - appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1); + appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1); return 0; } @@ -2016,6 +2039,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops pcie_ep_ops = { + .init = tegra_pcie_ep_init, .raise_irq = tegra_pcie_ep_raise_irq, .get_features = tegra_pcie_ep_get_features, }; diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index d7517c3976e7..927ef421c4fc 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -14,6 +14,7 @@ */ #include <linux/clk.h> +#include <linux/cleanup.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/export.h> @@ -269,7 +270,7 @@ struct tegra_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; struct mutex map_lock; - spinlock_t mask_lock; + raw_spinlock_t mask_lock; void *virt; dma_addr_t phys; int irq; @@ -1343,7 +1344,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port) unsigned int i; int err; - port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL); + port->phys = devm_kcalloc(dev, port->lanes, sizeof(phy), GFP_KERNEL); if (!port->phys) return -ENOMEM; @@ -1604,14 +1605,13 @@ static void tegra_msi_irq_mask(struct irq_data *d) struct tegra_msi *msi = irq_data_get_irq_chip_data(d); struct tegra_pcie *pcie = msi_to_pcie(msi); unsigned int index = d->hwirq / 32; - unsigned long flags; u32 value; - spin_lock_irqsave(&msi->mask_lock, flags); - value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); - value &= ~BIT(d->hwirq % 32); - afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); + value &= ~BIT(d->hwirq % 32); + afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); + } } static void tegra_msi_irq_unmask(struct irq_data *d) @@ -1619,14 +1619,13 @@ static void tegra_msi_irq_unmask(struct irq_data *d) struct tegra_msi *msi = irq_data_get_irq_chip_data(d); struct tegra_pcie *pcie = msi_to_pcie(msi); unsigned int index = d->hwirq / 32; - unsigned long flags; u32 value; - spin_lock_irqsave(&msi->mask_lock, flags); - value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); - value |= BIT(d->hwirq % 32); - afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); + value |= BIT(d->hwirq % 32); + afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); + } } static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) @@ -1736,7 +1735,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) int err; mutex_init(&msi->map_lock); - spin_lock_init(&msi->mask_lock); + raw_spin_lock_init(&msi->mask_lock); if (IS_ENABLED(CONFIG_PCI_MSI)) { err = tegra_allocate_domains(msi); diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index 3dd653f3d784..a90beedd8e24 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -12,6 +12,7 @@ */ #include <linux/bitops.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> @@ -37,7 +38,7 @@ struct rcar_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; struct mutex map_lock; - spinlock_t mask_lock; + raw_spinlock_t mask_lock; int irq1; int irq2; }; @@ -51,20 +52,13 @@ struct rcar_pcie_host { int (*phy_init_fn)(struct rcar_pcie_host *host); }; -static DEFINE_SPINLOCK(pmsr_lock); - static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base) { - unsigned long flags; u32 pmsr, val; int ret = 0; - spin_lock_irqsave(&pmsr_lock, flags); - - if (!pcie_base || pm_runtime_suspended(pcie_dev)) { - ret = -EINVAL; - goto unlock_exit; - } + if (!pcie_base || pm_runtime_suspended(pcie_dev)) + return -EINVAL; pmsr = readl(pcie_base + PMSR); @@ -86,8 +80,6 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base) writel(L1FAEG | PMEL1RX, pcie_base + PMSR); } -unlock_exit: - spin_unlock_irqrestore(&pmsr_lock, flags); return ret; } @@ -634,28 +626,26 @@ static void rcar_msi_irq_mask(struct irq_data *d) { struct rcar_msi *msi = irq_data_get_irq_chip_data(d); struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; - unsigned long flags; u32 value; - spin_lock_irqsave(&msi->mask_lock, flags); - value = rcar_pci_read_reg(pcie, PCIEMSIIER); - value &= ~BIT(d->hwirq); - rcar_pci_write_reg(pcie, value, PCIEMSIIER); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = rcar_pci_read_reg(pcie, PCIEMSIIER); + value &= ~BIT(d->hwirq); + rcar_pci_write_reg(pcie, value, PCIEMSIIER); + } } static void rcar_msi_irq_unmask(struct irq_data *d) { struct rcar_msi *msi = irq_data_get_irq_chip_data(d); struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; - unsigned long flags; u32 value; - spin_lock_irqsave(&msi->mask_lock, flags); - value = rcar_pci_read_reg(pcie, PCIEMSIIER); - value |= BIT(d->hwirq); - rcar_pci_write_reg(pcie, value, PCIEMSIIER); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = rcar_pci_read_reg(pcie, PCIEMSIIER); + value |= BIT(d->hwirq); + rcar_pci_write_reg(pcie, value, PCIEMSIIER); + } } static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) @@ -765,7 +755,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) int err; mutex_init(&msi->map_lock); - spin_lock_init(&msi->mask_lock); + raw_spin_lock_init(&msi->mask_lock); err = of_address_to_resource(dev->of_node, 0, &res); if (err) diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index a8ae14474dd0..2e7356c28b5e 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -721,9 +721,10 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | E_ECAM_CR_ENABLE, E_ECAM_CONTROL); - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | - (NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT), - E_ECAM_CONTROL); + ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL); + ecam_val &= ~E_ECAM_SIZE_LOC; + ecam_val |= NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT; + nwl_bridge_writel(pcie, ecam_val, E_ECAM_CONTROL); nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base), E_ECAM_BASE_LO); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 21aa3709e257..eeb7fbc2d67a 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -282,17 +282,20 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) if (!epf_test->dma_supported) return; - dma_release_channel(epf_test->dma_chan_tx); - if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) { + if (epf_test->dma_chan_tx) { + dma_release_channel(epf_test->dma_chan_tx); + if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) { + epf_test->dma_chan_tx = NULL; + epf_test->dma_chan_rx = NULL; + return; + } epf_test->dma_chan_tx = NULL; - epf_test->dma_chan_rx = NULL; - return; } - dma_release_channel(epf_test->dma_chan_rx); - epf_test->dma_chan_rx = NULL; - - return; + if (epf_test->dma_chan_rx) { + dma_release_channel(epf_test->dma_chan_rx); + epf_test->dma_chan_rx = NULL; + } } static void pci_epf_test_print_rate(struct pci_epf_test *epf_test, diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index aaa33e8dc4c9..44a33df3c69c 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -581,15 +581,18 @@ static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs) if (dev->no_vf_scan) return 0; + pci_lock_rescan_remove(); for (i = 0; i < num_vfs; i++) { rc = pci_iov_add_virtfn(dev, i); if (rc) goto failed; } + pci_unlock_rescan_remove(); return 0; failed: while (i--) pci_iov_remove_virtfn(dev, i); + pci_unlock_rescan_remove(); return rc; } @@ -709,8 +712,10 @@ static void sriov_del_vfs(struct pci_dev *dev) struct pci_sriov *iov = dev->sriov; int i; + pci_lock_rescan_remove(); for (i = 0; i < iov->num_VFs; i++) pci_iov_remove_virtfn(dev, i); + pci_unlock_rescan_remove(); } static void sriov_disable(struct pci_dev *dev) diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 99c58ee09fbb..0cd8a75e2258 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -122,6 +122,8 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge) { + bool ret = false; + if (ACPI_HANDLE(&host_bridge->dev)) { union acpi_object *obj; @@ -135,11 +137,11 @@ bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge) 1, DSM_PCI_PRESERVE_BOOT_CONFIG, NULL, ACPI_TYPE_INTEGER); if (obj && obj->integer.value == 0) - return true; + ret = true; ACPI_FREE(obj); } - return false; + return ret; } /* _HPX PCI Setting Record (Type 0); same as _HPP */ diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 35270172c833..0c3aa91d1aee 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1600,6 +1600,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type) switch (err_type) { case PCI_ERS_RESULT_NONE: case PCI_ERS_RESULT_CAN_RECOVER: + case PCI_ERS_RESULT_NEED_RESET: envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY"; envp[idx++] = "DEVICE_ONLINE=0"; break; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 5af4a804a4f8..96f9cf9f8d64 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -200,8 +200,14 @@ static ssize_t max_link_width_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); + ssize_t ret; - return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev)); + /* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */ + pci_config_pm_runtime_get(pdev); + ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev)); + pci_config_pm_runtime_put(pdev); + + return ret; } static DEVICE_ATTR_RO(max_link_width); @@ -213,7 +219,10 @@ static ssize_t current_link_speed_show(struct device *dev, int err; enum pci_bus_speed speed; + pci_config_pm_runtime_get(pci_dev); err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL; @@ -230,7 +239,10 @@ static ssize_t current_link_width_show(struct device *dev, u16 linkstat; int err; + pci_config_pm_runtime_get(pci_dev); err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL; @@ -246,7 +258,10 @@ static ssize_t secondary_bus_number_show(struct device *dev, u8 sec_bus; int err; + pci_config_pm_runtime_get(pci_dev); err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL; @@ -262,7 +277,10 @@ static ssize_t subordinate_bus_number_show(struct device *dev, u8 sub_bus; int err; + pci_config_pm_runtime_get(pci_dev); err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL; diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 13b8586924ea..e5cbea3a4968 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -38,7 +38,7 @@ #define AER_ERROR_SOURCES_MAX 128 #define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */ -#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/ +#define AER_MAX_TYPEOF_UNCOR_ERRS 32 /* as per PCI_ERR_UNCOR_STATUS*/ struct aer_err_source { u32 status; /* PCI_ERR_ROOT_STATUS */ @@ -510,11 +510,11 @@ static const char *aer_uncorrectable_error_string[] = { "AtomicOpBlocked", /* Bit Position 24 */ "TLPBlockedErr", /* Bit Position 25 */ "PoisonTLPBlocked", /* Bit Position 26 */ - NULL, /* Bit Position 27 */ - NULL, /* Bit Position 28 */ - NULL, /* Bit Position 29 */ - NULL, /* Bit Position 30 */ - NULL, /* Bit Position 31 */ + "DMWrReqBlocked", /* Bit Position 27 */ + "IDECheck", /* Bit Position 28 */ + "MisIDETLP", /* Bit Position 29 */ + "PCRC_CHECK", /* Bit Position 30 */ + "TLPXlatBlocked", /* Bit Position 31 */ }; static const char *aer_agent_string[] = { diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 31090770fffc..628d192de90b 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -108,6 +108,12 @@ static int report_normal_detected(struct pci_dev *dev, void *data) return report_error_detected(dev, pci_channel_io_normal, data); } +static int report_perm_failure_detected(struct pci_dev *dev, void *data) +{ + pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT); + return 0; +} + static int report_mmio_enabled(struct pci_dev *dev, void *data) { struct pci_driver *pdrv; @@ -269,7 +275,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, failed: pci_walk_bridge(bridge, pci_pm_runtime_put, NULL); - pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT); + pci_walk_bridge(bridge, report_perm_failure_detected, NULL); /* TODO: Should kernel panic here? */ pci_info(bridge, "device recovery failed\n"); diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 978b239ec10b..e6b0c8ec9c1f 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -65,7 +65,7 @@ /* PMU registers occupy the 3rd 4KB page of each node's region */ #define CMN_PMU_OFFSET 0x2000 /* ...except when they don't :( */ -#define CMN_S3_DTM_OFFSET 0xa000 +#define CMN_S3_R1_DTM_OFFSET 0xa000 #define CMN_S3_PMU_OFFSET 0xd900 /* For most nodes, this is all there is */ @@ -233,6 +233,9 @@ enum cmn_revision { REV_CMN700_R1P0, REV_CMN700_R2P0, REV_CMN700_R3P0, + REV_CMNS3_R0P0 = 0, + REV_CMNS3_R0P1, + REV_CMNS3_R1P0, REV_CI700_R0P0 = 0, REV_CI700_R1P0, REV_CI700_R2P0, @@ -425,8 +428,8 @@ static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn) static int arm_cmn_pmu_offset(const struct arm_cmn *cmn, const struct arm_cmn_node *dn) { if (cmn->part == PART_CMN_S3) { - if (dn->type == CMN_TYPE_XP) - return CMN_S3_DTM_OFFSET; + if (cmn->rev >= REV_CMNS3_R1P0 && dn->type == CMN_TYPE_XP) + return CMN_S3_R1_DTM_OFFSET; return CMN_S3_PMU_OFFSET; } return CMN_PMU_OFFSET; diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 3569050f9cf3..abd23430dc03 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -96,7 +96,8 @@ struct arm_spe_pmu { #define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu)) /* Convert a free-running index from perf into an SPE buffer offset */ -#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) +#define PERF_IDX2OFF(idx, buf) \ + ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT)) /* Keep track of our dynamic hotplug state */ static enum cpuhp_state arm_spe_pmu_online; diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c index 1ef6d9630f7e..fbaeb7ca600d 100644 --- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c +++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c @@ -122,6 +122,8 @@ struct rockchip_combphy_grfcfg { struct combphy_reg pipe_xpcs_phy_ready; struct combphy_reg pipe_pcie1l0_sel; struct combphy_reg pipe_pcie1l1_sel; + struct combphy_reg u3otg0_port_en; + struct combphy_reg u3otg1_port_en; }; struct rockchip_combphy_cfg { @@ -431,6 +433,14 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv) rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false); rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false); rockchip_combphy_param_write(priv->phy_grf, &cfg->usb_mode_set, true); + switch (priv->id) { + case 0: + rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg0_port_en, true); + break; + case 1: + rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg1_port_en, true); + break; + } break; case PHY_TYPE_SATA: @@ -574,6 +584,8 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = { /* pipe-grf */ .pipe_con0_for_sata = { 0x0000, 15, 0, 0x00, 0x2220 }, .pipe_xpcs_phy_ready = { 0x0040, 2, 2, 0x00, 0x01 }, + .u3otg0_port_en = { 0x0104, 15, 0, 0x0181, 0x1100 }, + .u3otg1_port_en = { 0x0144, 15, 0, 0x0181, 0x1100 }, }; static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = { diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c index 9171de657f97..a75762e4d264 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c @@ -187,6 +187,9 @@ static const unsigned int i2c_sda_c_pins[] = { GPIODV_28 }; static const unsigned int i2c_sck_c_dv19_pins[] = { GPIODV_19 }; static const unsigned int i2c_sda_c_dv18_pins[] = { GPIODV_18 }; +static const unsigned int i2c_sck_d_pins[] = { GPIOX_11 }; +static const unsigned int i2c_sda_d_pins[] = { GPIOX_10 }; + static const unsigned int eth_mdio_pins[] = { GPIOZ_0 }; static const unsigned int eth_mdc_pins[] = { GPIOZ_1 }; static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 }; @@ -411,6 +414,8 @@ static const struct meson_pmx_group meson_gxl_periphs_groups[] = { GPIO_GROUP(GPIO_TEST_N), /* Bank X */ + GROUP(i2c_sda_d, 5, 5), + GROUP(i2c_sck_d, 5, 4), GROUP(sdio_d0, 5, 31), GROUP(sdio_d1, 5, 30), GROUP(sdio_d2, 5, 29), @@ -651,6 +656,10 @@ static const char * const i2c_c_groups[] = { "i2c_sck_c", "i2c_sda_c", "i2c_sda_c_dv18", "i2c_sck_c_dv19", }; +static const char * const i2c_d_groups[] = { + "i2c_sck_d", "i2c_sda_d", +}; + static const char * const eth_groups[] = { "eth_mdio", "eth_mdc", "eth_clk_rx_clk", "eth_rx_dv", "eth_rxd0", "eth_rxd1", "eth_rxd2", "eth_rxd3", @@ -777,6 +786,7 @@ static const struct meson_pmx_func meson_gxl_periphs_functions[] = { FUNCTION(i2c_a), FUNCTION(i2c_b), FUNCTION(i2c_c), + FUNCTION(i2c_d), FUNCTION(eth), FUNCTION(pwm_a), FUNCTION(pwm_b), diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index 2c31e7f2a27a..56e14193d678 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -337,7 +337,7 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev, while (selector < nfuncs) { const char *fname = ops->get_function_name(pctldev, selector); - if (!strcmp(function, fname)) + if (fname && !strcmp(function, fname)) return selector; selector++; diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index bde58f5a743c..698ab8cc970a 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -1074,7 +1074,7 @@ static u32 rzg3s_oen_read(struct rzg2l_pinctrl *pctrl, unsigned int _pin) bit = rzg3s_pin_to_oen_bit(pctrl, _pin); if (bit < 0) - return bit; + return 0; return !(readb(pctrl->base + ETH_MODE) & BIT(bit)); } diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c index 29d16c9c1bd1..3a742f74ecd1 100644 --- a/drivers/pinctrl/renesas/pinctrl.c +++ b/drivers/pinctrl/renesas/pinctrl.c @@ -726,7 +726,8 @@ static int sh_pfc_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group, struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev); const unsigned int *pins; unsigned int num_pins; - unsigned int i, ret; + unsigned int i; + int ret; pins = pmx->pfc->info->groups[group].pins; num_pins = pmx->pfc->info->groups[group].nr_pins; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index 7ffd2e193e42..02b53ae9d9aa 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -393,10 +393,6 @@ extern const struct samsung_pinctrl_of_match_data exynosautov920_of_data; extern const struct samsung_pinctrl_of_match_data fsd_of_data; extern const struct samsung_pinctrl_of_match_data gs101_of_data; extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data; -extern const struct samsung_pinctrl_of_match_data s3c2412_of_data; -extern const struct samsung_pinctrl_of_match_data s3c2416_of_data; -extern const struct samsung_pinctrl_of_match_data s3c2440_of_data; -extern const struct samsung_pinctrl_of_match_data s3c2450_of_data; extern const struct samsung_pinctrl_of_match_data s5pv210_of_data; #endif /* __PINCTRL_SAMSUNG_H */ diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c index f63c3c410451..382dff8805c6 100644 --- a/drivers/power/supply/cw2015_battery.c +++ b/drivers/power/supply/cw2015_battery.c @@ -702,8 +702,7 @@ static int cw_bat_probe(struct i2c_client *client) if (!cw_bat->battery_workqueue) return -ENOMEM; - devm_delayed_work_autocancel(&client->dev, - &cw_bat->battery_delay_work, cw_bat_work); + devm_delayed_work_autocancel(&client->dev, &cw_bat->battery_delay_work, cw_bat_work); queue_delayed_work(cw_bat->battery_workqueue, &cw_bat->battery_delay_work, msecs_to_jiffies(10)); return 0; diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c index d7e520da7688..e2424efb266d 100644 --- a/drivers/power/supply/max77976_charger.c +++ b/drivers/power/supply/max77976_charger.c @@ -292,10 +292,10 @@ static int max77976_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_ONLINE: err = max77976_get_online(chg, &val->intval); break; - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: val->intval = MAX77976_CHG_CC_MAX; break; - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: err = max77976_get_integer(chg, CHG_CC, MAX77976_CHG_CC_MIN, MAX77976_CHG_CC_MAX, @@ -330,7 +330,7 @@ static int max77976_set_property(struct power_supply *psy, int err = 0; switch (psp) { - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: err = max77976_set_integer(chg, CHG_CC, MAX77976_CHG_CC_MIN, MAX77976_CHG_CC_MAX, @@ -355,7 +355,7 @@ static int max77976_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { switch (psp) { - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: return true; default: @@ -368,8 +368,8 @@ static enum power_supply_property max77976_psy_props[] = { POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_ONLINE, - POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, - POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index 92d1b62ea239..e9389876229e 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c @@ -109,16 +109,13 @@ struct pps_device *pps_register_source(struct pps_source_info *info, if (err < 0) { pr_err("%s: unable to create char device\n", info->name); - goto kfree_pps; + goto pps_register_source_exit; } dev_dbg(&pps->dev, "new PPS source %s\n", info->name); return pps; -kfree_pps: - kfree(pps); - pps_register_source_exit: pr_err("%s: unable to register source\n", info->name); diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index 9463232af8d2..c6b8b6478276 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c @@ -374,6 +374,7 @@ int pps_register_cdev(struct pps_device *pps) pps->info.name); err = -EBUSY; } + kfree(pps); goto out_unlock; } pps->id = err; @@ -383,13 +384,11 @@ int pps_register_cdev(struct pps_device *pps) pps->dev.devt = MKDEV(pps_major, pps->id); dev_set_drvdata(&pps->dev, pps); dev_set_name(&pps->dev, "pps%d", pps->id); + pps->dev.release = pps_device_destruct; err = device_register(&pps->dev); if (err) goto free_idr; - /* Override the release function with our own */ - pps->dev.release = pps_device_destruct; - pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, pps_major, pps->id); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index b352df4cd3f9..f329263f33aa 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -22,6 +22,7 @@ #define PTP_MAX_TIMESTAMPS 128 #define PTP_BUF_TIMESTAMPS 30 #define PTP_DEFAULT_MAX_VCLOCKS 20 +#define PTP_MAX_VCLOCKS_LIMIT (KMALLOC_MAX_SIZE/(sizeof(int))) #define PTP_MAX_CHANNELS 2048 enum { diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 6b1b8f57cd95..200eaf500696 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -284,7 +284,7 @@ static ssize_t max_vclocks_store(struct device *dev, size_t size; u32 max; - if (kstrtou32(buf, 0, &max) || max == 0) + if (kstrtou32(buf, 0, &max) || max == 0 || max > PTP_MAX_VCLOCKS_LIMIT) return -EINVAL; if (max == ptp->max_vclocks) diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c index 831aed228caf..858d36991374 100644 --- a/drivers/pwm/pwm-berlin.c +++ b/drivers/pwm/pwm-berlin.c @@ -234,7 +234,7 @@ static int berlin_pwm_suspend(struct device *dev) for (i = 0; i < chip->npwm; i++) { struct berlin_pwm_channel *channel = &bpc->channel[i]; - channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE); + channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_EN); channel->ctrl = berlin_pwm_readl(bpc, i, BERLIN_PWM_CONTROL); channel->duty = berlin_pwm_readl(bpc, i, BERLIN_PWM_DUTY); channel->tcnt = berlin_pwm_readl(bpc, i, BERLIN_PWM_TCNT); @@ -262,7 +262,7 @@ static int berlin_pwm_resume(struct device *dev) berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL); berlin_pwm_writel(bpc, i, channel->duty, BERLIN_PWM_DUTY); berlin_pwm_writel(bpc, i, channel->tcnt, BERLIN_PWM_TCNT); - berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_ENABLE); + berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_EN); } return 0; diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index 0125e73b98df..7a86cb090f76 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -36,7 +36,7 @@ #define CLKDIV_MAX 7 #define HSPCLKDIV_MAX 7 -#define PERIOD_MAX 0xFFFF +#define PERIOD_MAX 0x10000 /* compare module registers */ #define CMPA 0x12 @@ -65,14 +65,10 @@ #define AQCTL_ZRO_FRCHIGH BIT(1) #define AQCTL_ZRO_FRCTOGGLE (BIT(1) | BIT(0)) -#define AQCTL_CHANA_POLNORMAL (AQCTL_CAU_FRCLOW | AQCTL_PRD_FRCHIGH | \ - AQCTL_ZRO_FRCHIGH) -#define AQCTL_CHANA_POLINVERSED (AQCTL_CAU_FRCHIGH | AQCTL_PRD_FRCLOW | \ - AQCTL_ZRO_FRCLOW) -#define AQCTL_CHANB_POLNORMAL (AQCTL_CBU_FRCLOW | AQCTL_PRD_FRCHIGH | \ - AQCTL_ZRO_FRCHIGH) -#define AQCTL_CHANB_POLINVERSED (AQCTL_CBU_FRCHIGH | AQCTL_PRD_FRCLOW | \ - AQCTL_ZRO_FRCLOW) +#define AQCTL_CHANA_POLNORMAL (AQCTL_CAU_FRCLOW | AQCTL_ZRO_FRCHIGH) +#define AQCTL_CHANA_POLINVERSED (AQCTL_CAU_FRCHIGH | AQCTL_ZRO_FRCLOW) +#define AQCTL_CHANB_POLNORMAL (AQCTL_CBU_FRCLOW | AQCTL_ZRO_FRCHIGH) +#define AQCTL_CHANB_POLINVERSED (AQCTL_CBU_FRCHIGH | AQCTL_ZRO_FRCLOW) #define AQSFRC_RLDCSF_MASK (BIT(7) | BIT(6)) #define AQSFRC_RLDCSF_ZRO 0 @@ -108,7 +104,6 @@ struct ehrpwm_pwm_chip { unsigned long clk_rate; void __iomem *mmio_base; unsigned long period_cycles[NUM_PWM_CHANNEL]; - enum pwm_polarity polarity[NUM_PWM_CHANNEL]; struct clk *tbclk; struct ehrpwm_context ctx; }; @@ -166,7 +161,7 @@ static int set_prescale_div(unsigned long rqst_prescaler, u16 *prescale_div, *prescale_div = (1 << clkdiv) * (hspclkdiv ? (hspclkdiv * 2) : 1); - if (*prescale_div > rqst_prescaler) { + if (*prescale_div >= rqst_prescaler) { *tb_clk_div = (clkdiv << TBCTL_CLKDIV_SHIFT) | (hspclkdiv << TBCTL_HSPCLKDIV_SHIFT); return 0; @@ -177,51 +172,20 @@ static int set_prescale_div(unsigned long rqst_prescaler, u16 *prescale_div, return 1; } -static void configure_polarity(struct ehrpwm_pwm_chip *pc, int chan) -{ - u16 aqctl_val, aqctl_mask; - unsigned int aqctl_reg; - - /* - * Configure PWM output to HIGH/LOW level on counter - * reaches compare register value and LOW/HIGH level - * on counter value reaches period register value and - * zero value on counter - */ - if (chan == 1) { - aqctl_reg = AQCTLB; - aqctl_mask = AQCTL_CBU_MASK; - - if (pc->polarity[chan] == PWM_POLARITY_INVERSED) - aqctl_val = AQCTL_CHANB_POLINVERSED; - else - aqctl_val = AQCTL_CHANB_POLNORMAL; - } else { - aqctl_reg = AQCTLA; - aqctl_mask = AQCTL_CAU_MASK; - - if (pc->polarity[chan] == PWM_POLARITY_INVERSED) - aqctl_val = AQCTL_CHANA_POLINVERSED; - else - aqctl_val = AQCTL_CHANA_POLNORMAL; - } - - aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK; - ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val); -} - /* * period_ns = 10^9 * (ps_divval * period_cycles) / PWM_CLK_RATE * duty_ns = 10^9 * (ps_divval * duty_cycles) / PWM_CLK_RATE */ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - u64 duty_ns, u64 period_ns) + u64 duty_ns, u64 period_ns, enum pwm_polarity polarity) { struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); u32 period_cycles, duty_cycles; u16 ps_divval, tb_divval; unsigned int i, cmp_reg; unsigned long long c; + u16 aqctl_val, aqctl_mask; + unsigned int aqctl_reg; if (period_ns > NSEC_PER_SEC) return -ERANGE; @@ -231,15 +195,10 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, do_div(c, NSEC_PER_SEC); period_cycles = (unsigned long)c; - if (period_cycles < 1) { - period_cycles = 1; - duty_cycles = 1; - } else { - c = pc->clk_rate; - c = c * duty_ns; - do_div(c, NSEC_PER_SEC); - duty_cycles = (unsigned long)c; - } + c = pc->clk_rate; + c = c * duty_ns; + do_div(c, NSEC_PER_SEC); + duty_cycles = (unsigned long)c; /* * Period values should be same for multiple PWM channels as IP uses @@ -265,52 +224,73 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, pc->period_cycles[pwm->hwpwm] = period_cycles; /* Configure clock prescaler to support Low frequency PWM wave */ - if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval, + if (set_prescale_div(DIV_ROUND_UP(period_cycles, PERIOD_MAX), &ps_divval, &tb_divval)) { dev_err(pwmchip_parent(chip), "Unsupported values\n"); return -EINVAL; } - pm_runtime_get_sync(pwmchip_parent(chip)); - - /* Update clock prescaler values */ - ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval); - /* Update period & duty cycle with presacler division */ period_cycles = period_cycles / ps_divval; duty_cycles = duty_cycles / ps_divval; - /* Configure shadow loading on Period register */ - ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW); + if (period_cycles < 1) + period_cycles = 1; - ehrpwm_write(pc->mmio_base, TBPRD, period_cycles); + pm_runtime_get_sync(pwmchip_parent(chip)); - /* Configure ehrpwm counter for up-count mode */ - ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK, - TBCTL_CTRMODE_UP); + /* Update clock prescaler values */ + ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval); - if (pwm->hwpwm == 1) + if (pwm->hwpwm == 1) { /* Channel 1 configured with compare B register */ cmp_reg = CMPB; - else + + aqctl_reg = AQCTLB; + aqctl_mask = AQCTL_CBU_MASK; + + if (polarity == PWM_POLARITY_INVERSED) + aqctl_val = AQCTL_CHANB_POLINVERSED; + else + aqctl_val = AQCTL_CHANB_POLNORMAL; + + /* if duty_cycle is big, don't toggle on CBU */ + if (duty_cycles > period_cycles) + aqctl_val &= ~AQCTL_CBU_MASK; + + } else { /* Channel 0 configured with compare A register */ cmp_reg = CMPA; - ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles); + aqctl_reg = AQCTLA; + aqctl_mask = AQCTL_CAU_MASK; - pm_runtime_put_sync(pwmchip_parent(chip)); + if (polarity == PWM_POLARITY_INVERSED) + aqctl_val = AQCTL_CHANA_POLINVERSED; + else + aqctl_val = AQCTL_CHANA_POLNORMAL; - return 0; -} + /* if duty_cycle is big, don't toggle on CAU */ + if (duty_cycles > period_cycles) + aqctl_val &= ~AQCTL_CAU_MASK; + } -static int ehrpwm_pwm_set_polarity(struct pwm_chip *chip, - struct pwm_device *pwm, - enum pwm_polarity polarity) -{ - struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); + aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK; + ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val); + + /* Configure shadow loading on Period register */ + ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW); + + ehrpwm_write(pc->mmio_base, TBPRD, period_cycles - 1); - /* Configuration of polarity in hardware delayed, do at enable */ - pc->polarity[pwm->hwpwm] = polarity; + /* Configure ehrpwm counter for up-count mode */ + ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK, + TBCTL_CTRMODE_UP); + + if (!(duty_cycles > period_cycles)) + ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles); + + pm_runtime_put_sync(pwmchip_parent(chip)); return 0; } @@ -339,9 +319,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); - /* Channels polarity can be configured from action qualifier module */ - configure_polarity(pc, pwm->hwpwm); - /* Enable TBCLK */ ret = clk_enable(pc->tbclk); if (ret) { @@ -391,12 +368,7 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); - if (pwm_is_enabled(pwm)) { - dev_warn(pwmchip_parent(chip), "Removing PWM device without disabling\n"); - pm_runtime_put_sync(pwmchip_parent(chip)); - } - - /* set period value to zero on free */ + /* Don't let a pwm without consumer block requests to the other channel */ pc->period_cycles[pwm->hwpwm] = 0; } @@ -411,10 +383,6 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, ehrpwm_pwm_disable(chip, pwm); enabled = false; } - - err = ehrpwm_pwm_set_polarity(chip, pwm, state->polarity); - if (err) - return err; } if (!state->enabled) { @@ -423,7 +391,7 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - err = ehrpwm_pwm_config(chip, pwm, state->duty_cycle, state->period); + err = ehrpwm_pwm_config(chip, pwm, state->duty_cycle, state->period, state->polarity); if (err) return err; diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c index 9df726f10ad1..6d609c42e479 100644 --- a/drivers/regulator/scmi-regulator.c +++ b/drivers/regulator/scmi-regulator.c @@ -257,7 +257,8 @@ static int process_scmi_regulator_of_node(struct scmi_device *sdev, struct device_node *np, struct scmi_regulator_info *rinfo) { - u32 dom, ret; + u32 dom; + int ret; ret = of_property_read_u32(np, "reg", &dom); if (ret) diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c index 327f0c7ee3d6..7ffbae209a31 100644 --- a/drivers/remoteproc/pru_rproc.c +++ b/drivers/remoteproc/pru_rproc.c @@ -340,7 +340,7 @@ EXPORT_SYMBOL_GPL(pru_rproc_put); */ int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr) { - struct pru_rproc *pru = rproc->priv; + struct pru_rproc *pru; unsigned int reg; u32 mask, set; u16 idx; @@ -352,6 +352,7 @@ int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr) if (!rproc->dev.parent || !is_pru_rproc(rproc->dev.parent)) return -ENODEV; + pru = rproc->priv; /* pointer is 16 bit and index is 8-bit so mask out the rest */ idx_mask = (c >= PRU_C28) ? 0xFFFF : 0xFF; diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c index 4ee5e67a9f03..769c6d6d6a73 100644 --- a/drivers/remoteproc/qcom_q6v5.c +++ b/drivers/remoteproc/qcom_q6v5.c @@ -156,9 +156,6 @@ int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout) int ret; ret = wait_for_completion_timeout(&q6v5->start_done, timeout); - if (!ret) - disable_irq(q6v5->handover_irq); - return !ret ? -ETIMEDOUT : 0; } EXPORT_SYMBOL_GPL(qcom_q6v5_wait_for_start); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index aaf76406cd7d..39db12f267cc 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -443,6 +443,29 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) else err = rtc->ops->set_alarm(rtc->dev.parent, alarm); + /* + * Check for potential race described above. If the waiting for next + * second, and the second just ticked since the check above, either + * + * 1) It ticked after the alarm was set, and an alarm irq should be + * generated. + * + * 2) It ticked before the alarm was set, and alarm irq most likely will + * not be generated. + * + * While we cannot easily check for which of these two scenarios we + * are in, we can return -ETIME to signal that the timer has already + * expired, which is true in both cases. + */ + if ((scheduled - now) <= 1) { + err = __rtc_read_time(rtc, &tm); + if (err) + return err; + now = rtc_tm_to_time64(&tm); + if (scheduled <= now) + return -ETIME; + } + trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err); return err; } @@ -594,6 +617,10 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); rtc->uie_rtctimer.period = ktime_set(1, 0); err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); + if (!err && rtc->ops && rtc->ops->alarm_irq_enable) + err = rtc->ops->alarm_irq_enable(rtc->dev.parent, 1); + if (err) + goto out; } else { rtc_timer_remove(rtc, &rtc->uie_rtctimer); } diff --git a/drivers/rtc/rtc-optee.c b/drivers/rtc/rtc-optee.c index 9f8b5d4a8f6b..6b77c122fdc1 100644 --- a/drivers/rtc/rtc-optee.c +++ b/drivers/rtc/rtc-optee.c @@ -320,6 +320,7 @@ static int optee_rtc_remove(struct device *dev) { struct optee_rtc *priv = dev_get_drvdata(dev); + tee_shm_free(priv->shm); tee_client_close_session(priv->ctx, priv->session_id); tee_client_close_context(priv->ctx); diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c index 4bcd7ca32f27..b8a0fccef14e 100644 --- a/drivers/rtc/rtc-x1205.c +++ b/drivers/rtc/rtc-x1205.c @@ -669,7 +669,7 @@ static const struct i2c_device_id x1205_id[] = { MODULE_DEVICE_TABLE(i2c, x1205_id); static const struct of_device_id x1205_dt_ids[] = { - { .compatible = "xircom,x1205", }, + { .compatible = "xicor,x1205", }, {}, }; MODULE_DEVICE_TABLE(of, x1205_dt_ids); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 42a4a996defb..93f346d313e9 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -332,6 +332,11 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) lim.max_dev_sectors = device->discipline->max_sectors(block); lim.max_hw_sectors = lim.max_dev_sectors; lim.logical_block_size = block->bp_block; + /* + * Adjust dma_alignment to match block_size - 1 + * to ensure proper buffer alignment checks in the block layer. + */ + lim.dma_alignment = lim.logical_block_size - 1; if (device->discipline->has_discard) { unsigned int max_bytes; @@ -3112,12 +3117,14 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, PTR_ERR(cqr) == -ENOMEM || PTR_ERR(cqr) == -EAGAIN) { rc = BLK_STS_RESOURCE; - goto out; + } else if (PTR_ERR(cqr) == -EINVAL) { + rc = BLK_STS_INVAL; + } else { + DBF_DEV_EVENT(DBF_ERR, basedev, + "CCW creation failed (rc=%ld) on request %p", + PTR_ERR(cqr), req); + rc = BLK_STS_IOERR; } - DBF_DEV_EVENT(DBF_ERR, basedev, - "CCW creation failed (rc=%ld) on request %p", - PTR_ERR(cqr), req); - rc = BLK_STS_IOERR; goto out; } /* diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 9498825d9c7a..858a9e171351 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1318,23 +1318,34 @@ void ccw_device_schedule_recovery(void) spin_unlock_irqrestore(&recovery_lock, flags); } -static int purge_fn(struct device *dev, void *data) +static int purge_fn(struct subchannel *sch, void *data) { - struct ccw_device *cdev = to_ccwdev(dev); - struct ccw_dev_id *id = &cdev->private->dev_id; - struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_device *cdev; - spin_lock_irq(cdev->ccwlock); - if (is_blacklisted(id->ssid, id->devno) && - (cdev->private->state == DEV_STATE_OFFLINE) && - (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { - CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, - id->devno); + spin_lock_irq(&sch->lock); + if (sch->st != SUBCHANNEL_TYPE_IO || !sch->schib.pmcw.dnv) + goto unlock; + + if (!is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) + goto unlock; + + cdev = sch_get_cdev(sch); + if (cdev) { + if (cdev->private->state != DEV_STATE_OFFLINE) + goto unlock; + + if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) + goto unlock; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); - css_sched_sch_todo(sch, SCH_TODO_UNREG); atomic_set(&cdev->private->onoff, 0); } - spin_unlock_irq(cdev->ccwlock); + + css_sched_sch_todo(sch, SCH_TODO_UNREG); + CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x%s\n", sch->schid.ssid, + sch->schib.pmcw.dev, cdev ? "" : " (no cdev)"); + +unlock: + spin_unlock_irq(&sch->lock); /* Abort loop in case of pending signal. */ if (signal_pending(current)) return -EINTR; @@ -1350,7 +1361,7 @@ static int purge_fn(struct device *dev, void *data) int ccw_purge_blacklisted(void) { CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); - bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); + for_each_subchannel_staged(purge_fn, NULL, NULL); return 0; } diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 0c49414c1f35..6cb6586f4790 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6528,18 +6528,21 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, while (left) { sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; buff_size[sg_used] = sz; - buff[sg_used] = kmalloc(sz, GFP_KERNEL); - if (buff[sg_used] == NULL) { - status = -ENOMEM; - goto cleanup1; - } + if (ioc->Request.Type.Direction & XFER_WRITE) { - if (copy_from_user(buff[sg_used], data_ptr, sz)) { - status = -EFAULT; + buff[sg_used] = memdup_user(data_ptr, sz); + if (IS_ERR(buff[sg_used])) { + status = PTR_ERR(buff[sg_used]); goto cleanup1; } - } else - memset(buff[sg_used], 0, sz); + } else { + buff[sg_used] = kzalloc(sz, GFP_KERNEL); + if (!buff[sg_used]) { + status = -ENOMEM; + goto cleanup1; + } + } + left -= sz; data_ptr += sz; sg_used++; diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index d84413b77d84..421db8996927 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -987,11 +987,9 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, list_for_each_entry_safe(mpt3sas_phy, next_phy, &mpt3sas_port->phy_list, port_siblings) { if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) - dev_printk(KERN_INFO, &mpt3sas_port->port->dev, - "remove: sas_addr(0x%016llx), phy(%d)\n", - (unsigned long long) - mpt3sas_port->remote_identify.sas_address, - mpt3sas_phy->phy_id); + ioc_info(ioc, "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) mpt3sas_port->remote_identify.sas_address, + mpt3sas_phy->phy_id); mpt3sas_phy->phy_belongs_to_port = 0; if (!ioc->remove_host) sas_port_delete_phy(mpt3sas_port->port, diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 020037cbf0d9..655cca5fe7cc 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -124,7 +124,7 @@ static void mvs_free(struct mvs_info *mvi) if (mvi->shost) scsi_host_put(mvi->shost); list_for_each_entry(mwq, &mvi->wq_list, entry) - cancel_delayed_work(&mwq->work_q); + cancel_delayed_work_sync(&mwq->work_q); kfree(mvi->rsvd_tags); kfree(mvi); } diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index 1469d0c54e45..5a02fd3bc6c9 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -498,14 +498,14 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs, /* Temporary dma mapping, used only in the scope of this function */ mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox), &mbox_addr, GFP_KERNEL); - if (dma_mapping_error(&pdev->dev, mbox_addr)) + if (!mbox) return false; /* These are the base addresses for the command memory mailbox array */ cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox); cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size, &cs->cmd_mbox_addr, GFP_KERNEL); - if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) { + if (!cmd_mbox) { dev_err(&pdev->dev, "Failed to map command mailbox\n"); goto out_free; } @@ -520,7 +520,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs, cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox); stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size, &cs->stat_mbox_addr, GFP_KERNEL); - if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) { + if (!stat_mbox) { dev_err(&pdev->dev, "Failed to map status mailbox\n"); goto out_free; } @@ -533,7 +533,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs, cs->fwstat_buf = dma_alloc_coherent(&pdev->dev, sizeof(struct myrs_fwstat), &cs->fwstat_addr, GFP_KERNEL); - if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) { + if (!cs->fwstat_buf) { dev_err(&pdev->dev, "Failed to map firmware health buffer\n"); cs->fwstat_buf = NULL; goto out_free; diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index a9d6dac41334..4daab8b6d675 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -703,6 +703,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev) unsigned long flags = 0; struct pm8001_hba_info *pm8001_ha; struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct domain_device *parent_dev = dev->parent; pm8001_ha = pm8001_find_ha_by_dev(dev); spin_lock_irqsave(&pm8001_ha->lock, flags); @@ -719,7 +720,13 @@ static void pm8001_dev_gone_notify(struct domain_device *dev) spin_lock_irqsave(&pm8001_ha->lock, flags); } PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); - pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0; + + /* + * The phy array only contains local phys. Thus, we cannot clear + * phy_attached for a device behind an expander. + */ + if (!(parent_dev && dev_is_expander(parent_dev->dev_type))) + pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0; pm8001_free_dev(pm8001_dev); } else { pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n"); diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c index dcde55c8ee5d..be20e2c457b8 100644 --- a/drivers/scsi/qla2xxx/qla_edif.c +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -1797,7 +1797,7 @@ retry: switch (rval) { case QLA_SUCCESS: break; - case EAGAIN: + case -EAGAIN: msleep(EDIF_MSLEEP_INTERVAL); cnt++; if (cnt < EDIF_RETRY_COUNT) @@ -3648,7 +3648,7 @@ retry: p->e.extra_rx_xchg_address, p->e.extra_control_flags, sp->handle, sp->remap.req.len, bsg_job); break; - case EAGAIN: + case -EAGAIN: msleep(EDIF_MSLEEP_INTERVAL); cnt++; if (cnt < EDIF_RETRY_COUNT) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 79cdfec2bca3..8bd4aa935e22 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2059,11 +2059,11 @@ static void qla_marker_sp_done(srb_t *sp, int res) int cnt = 5; \ do { \ if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\ - _rval = EINVAL; \ + _rval = -EINVAL; \ break; \ } \ _rval = qla2x00_start_sp(_sp); \ - if (_rval == EAGAIN) \ + if (_rval == -EAGAIN) \ msleep(1); \ else \ break; \ diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 8ee2e337c9e1..316594aa40cc 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -419,7 +419,7 @@ retry: switch (rval) { case QLA_SUCCESS: break; - case EAGAIN: + case -EAGAIN: msleep(PURLS_MSLEEP_INTERVAL); cnt++; if (cnt < PURLS_RETRY_COUNT) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index ee1d5dec3bc6..3745cf856917 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3695,10 +3695,10 @@ static int sd_revalidate_disk(struct gendisk *disk) struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; sector_t old_capacity = sdkp->capacity; - struct queue_limits lim; - unsigned char *buffer; + struct queue_limits *lim = NULL; + unsigned char *buffer = NULL; unsigned int dev_max; - int err; + int err = 0; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_revalidate_disk\n")); @@ -3710,6 +3710,10 @@ static int sd_revalidate_disk(struct gendisk *disk) if (!scsi_device_online(sdp)) goto out; + lim = kmalloc(sizeof(*lim), GFP_KERNEL); + if (!lim) + goto out; + buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL); if (!buffer) { sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory " @@ -3719,14 +3723,14 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_spinup_disk(sdkp); - lim = queue_limits_start_update(sdkp->disk->queue); + *lim = queue_limits_start_update(sdkp->disk->queue); /* * Without media there is no reason to ask; moreover, some devices * react badly if we do. */ if (sdkp->media_present) { - sd_read_capacity(sdkp, &lim, buffer); + sd_read_capacity(sdkp, lim, buffer); /* * Some USB/UAS devices return generic values for mode pages * until the media has been accessed. Trigger a READ operation @@ -3740,17 +3744,17 @@ static int sd_revalidate_disk(struct gendisk *disk) * cause this to be updated correctly and any device which * doesn't support it should be treated as rotational. */ - lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); + lim->features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); - sd_read_block_limits(sdkp, &lim); + sd_read_block_limits(sdkp, lim); sd_read_block_limits_ext(sdkp); - sd_read_block_characteristics(sdkp, &lim); - sd_zbc_read_zones(sdkp, &lim, buffer); + sd_read_block_characteristics(sdkp, lim); + sd_zbc_read_zones(sdkp, lim, buffer); } - sd_config_discard(sdkp, &lim, sd_discard_mode(sdkp)); + sd_config_discard(sdkp, lim, sd_discard_mode(sdkp)); sd_print_capacity(sdkp, old_capacity); @@ -3760,47 +3764,46 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_app_tag_own(sdkp, buffer); sd_read_write_same(sdkp, buffer); sd_read_security(sdkp, buffer); - sd_config_protection(sdkp, &lim); + sd_config_protection(sdkp, lim); } /* * We now have all cache related info, determine how we deal * with flush requests. */ - sd_set_flush_flag(sdkp, &lim); + sd_set_flush_flag(sdkp, lim); /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; /* Some devices report a maximum block count for READ/WRITE requests. */ dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); - lim.max_dev_sectors = logical_to_sectors(sdp, dev_max); + lim->max_dev_sectors = logical_to_sectors(sdp, dev_max); if (sd_validate_min_xfer_size(sdkp)) - lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks); + lim->io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks); else - lim.io_min = 0; + lim->io_min = 0; /* * Limit default to SCSI host optimal sector limit if set. There may be * an impact on performance for when the size of a request exceeds this * host limit. */ - lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT; + lim->io_opt = sdp->host->opt_sectors << SECTOR_SHIFT; if (sd_validate_opt_xfer_size(sdkp, dev_max)) { - lim.io_opt = min_not_zero(lim.io_opt, + lim->io_opt = min_not_zero(lim->io_opt, logical_to_bytes(sdp, sdkp->opt_xfer_blocks)); } sdkp->first_scan = 0; set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity)); - sd_config_write_same(sdkp, &lim); - kfree(buffer); + sd_config_write_same(sdkp, lim); - err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim); + err = queue_limits_commit_update_frozen(sdkp->disk->queue, lim); if (err) - return err; + goto out; /* * Query concurrent positioning ranges after @@ -3819,7 +3822,10 @@ static int sd_revalidate_disk(struct gendisk *disk) set_capacity_and_notify(disk, 0); out: - return 0; + kfree(buffer); + kfree(lim); + + return err; } /** diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c index 9a91298c1253..4cb8169aec6b 100644 --- a/drivers/soc/mediatek/mtk-svs.c +++ b/drivers/soc/mediatek/mtk-svs.c @@ -2167,10 +2167,18 @@ static struct device *svs_add_device_link(struct svs_platform *svsp, return dev; } +static void svs_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + static int svs_mt8192_platform_probe(struct svs_platform *svsp) { struct device *dev; u32 idx; + int ret; svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst"); if (IS_ERR(svsp->rst)) @@ -2181,6 +2189,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp) if (IS_ERR(dev)) return dev_err_probe(svsp->dev, PTR_ERR(dev), "failed to get lvts device\n"); + put_device(dev); for (idx = 0; idx < svsp->bank_max; idx++) { struct svs_bank *svsb = &svsp->banks[idx]; @@ -2190,6 +2199,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp) case SVSB_SWID_CPU_LITTLE: case SVSB_SWID_CPU_BIG: svsb->opp_dev = get_cpu_device(bdata->cpu_id); + get_device(svsb->opp_dev); break; case SVSB_SWID_CCI: svsb->opp_dev = svs_add_device_link(svsp, "cci"); @@ -2209,6 +2219,11 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp) return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), "failed to get OPP device for bank %d\n", idx); + + ret = devm_add_action_or_reset(svsp->dev, svs_put_device, + svsb->opp_dev); + if (ret) + return ret; } return 0; @@ -2218,11 +2233,13 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp) { struct device *dev; u32 idx; + int ret; dev = svs_add_device_link(svsp, "thermal-sensor"); if (IS_ERR(dev)) return dev_err_probe(svsp->dev, PTR_ERR(dev), "failed to get thermal device\n"); + put_device(dev); for (idx = 0; idx < svsp->bank_max; idx++) { struct svs_bank *svsb = &svsp->banks[idx]; @@ -2232,6 +2249,7 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp) case SVSB_SWID_CPU_LITTLE: case SVSB_SWID_CPU_BIG: svsb->opp_dev = get_cpu_device(bdata->cpu_id); + get_device(svsb->opp_dev); break; case SVSB_SWID_CCI: svsb->opp_dev = svs_add_device_link(svsp, "cci"); @@ -2248,6 +2266,11 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp) return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), "failed to get OPP device for bank %d\n", idx); + + ret = devm_add_action_or_reset(svsp->dev, svs_put_device, + svsb->opp_dev); + if (ret) + return ret; } return 0; diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index 641f29a98cbd..cc72a31a450e 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -453,13 +453,10 @@ static irqreturn_t tcs_tx_done(int irq, void *p) trace_rpmh_tx_done(drv, i, req); - /* - * If wake tcs was re-purposed for sending active - * votes, clear AMC trigger & enable modes and + /* Clear AMC trigger & enable modes and * disable interrupt for this TCS */ - if (!drv->tcs[ACTIVE_TCS].num_tcs) - __tcs_set_trigger(drv, i, false); + __tcs_set_trigger(drv, i, false); skip: /* Reclaim the TCS */ write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 56be0b6901a8..06e43b184d85 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -712,6 +712,7 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata, reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; reg |= (op->addr.nbytes - 1); writel(reg, reg_base + CQSPI_REG_SIZE); + readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */ return 0; } @@ -754,6 +755,7 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, reinit_completion(&cqspi->transfer_complete); writel(CQSPI_REG_INDIRECTRD_START_MASK, reg_base + CQSPI_REG_INDIRECTRD); + readl(reg_base + CQSPI_REG_INDIRECTRD); /* Flush posted write. */ while (remaining > 0) { if (use_irq && @@ -1033,6 +1035,7 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; reg |= (op->addr.nbytes - 1); writel(reg, reg_base + CQSPI_REG_SIZE); + readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */ return 0; } @@ -1058,6 +1061,8 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata, reinit_completion(&cqspi->transfer_complete); writel(CQSPI_REG_INDIRECTWR_START_MASK, reg_base + CQSPI_REG_INDIRECTWR); + readl(reg_base + CQSPI_REG_INDIRECTWR); /* Flush posted write. */ + /* * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access * Controller programming sequence, couple of cycles of @@ -1668,12 +1673,10 @@ static const struct spi_controller_mem_caps cqspi_mem_caps = { static int cqspi_setup_flash(struct cqspi_st *cqspi) { - unsigned int max_cs = cqspi->num_chipselect - 1; struct platform_device *pdev = cqspi->pdev; struct device *dev = &pdev->dev; struct cqspi_flash_pdata *f_pdata; - unsigned int cs; - int ret; + int ret, cs, max_cs = -1; /* Get flash device data */ for_each_available_child_of_node_scoped(dev->of_node, np) { @@ -1686,10 +1689,10 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi) if (cs >= cqspi->num_chipselect) { dev_err(dev, "Chip select %d out of range.\n", cs); return -EINVAL; - } else if (cs < max_cs) { - max_cs = cs; } + max_cs = max_t(int, cs, max_cs); + f_pdata = &cqspi->f_pdata[cs]; f_pdata->cqspi = cqspi; f_pdata->cs = cs; @@ -1699,6 +1702,11 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi) return ret; } + if (max_cs < 0) { + dev_err(dev, "No flash device declared\n"); + return -ENODEV; + } + cqspi->num_chipselect = max_cs + 1; return 0; } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8d6341b0d866..5ad9f4a2148f 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2462,7 +2462,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, if (rc > ctlr->num_chipselect) { dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n", nc, rc); - return rc; + return -EINVAL; } if ((of_property_read_bool(nc, "parallel-memories")) && (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) { diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 2a7d253d9c55..8e50476eb71f 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -321,6 +321,14 @@ register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags, if (unlikely(len <= 0)) { ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM); goto err_free_shm_pages; + } else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) { + /* + * If we only got a few pages, update to release the + * correct amount below. + */ + shm->num_pages = len / PAGE_SIZE; + ret = ERR_PTR(-ENOMEM); + goto err_put_shm_pages; } /* diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig index 2c7f3f9a26eb..a6bb01082ec6 100644 --- a/drivers/thermal/qcom/Kconfig +++ b/drivers/thermal/qcom/Kconfig @@ -34,7 +34,8 @@ config QCOM_SPMI_TEMP_ALARM config QCOM_LMH tristate "Qualcomm Limits Management Hardware" - depends on ARCH_QCOM && QCOM_SCM + depends on ARCH_QCOM || COMPILE_TEST + select QCOM_SCM help This enables initialization of Qualcomm limits management hardware(LMh). LMh allows for hardware-enforced mitigation for cpus based on diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c index d2d49264cf83..7c299184c59b 100644 --- a/drivers/thermal/qcom/lmh.c +++ b/drivers/thermal/qcom/lmh.c @@ -5,6 +5,8 @@ */ #include <linux/module.h> #include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqdesc.h> #include <linux/irqdomain.h> #include <linux/err.h> #include <linux/platform_device.h> diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 252849910588..c917fc20b469 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -461,6 +461,7 @@ static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg); static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr); static void gsmld_write_trigger(struct gsm_mux *gsm); static void gsmld_write_task(struct work_struct *work); +static int gsm_modem_send_initial_msc(struct gsm_dlci *dlci); /** * gsm_fcs_add - update FCS @@ -2174,7 +2175,7 @@ static void gsm_dlci_open(struct gsm_dlci *dlci) pr_debug("DLCI %d goes open.\n", dlci->addr); /* Send current modem state */ if (dlci->addr) { - gsm_modem_update(dlci, 0); + gsm_modem_send_initial_msc(dlci); } else { /* Start keep-alive control */ gsm->ka_num = 0; @@ -4157,6 +4158,28 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk) } /** + * gsm_modem_send_initial_msc - Send initial modem status message + * + * @dlci channel + * + * Send an initial MSC message after DLCI open to set the initial + * modem status lines. This is only done for basic mode. + * Does not wait for a response as we cannot block the input queue + * processing. + */ +static int gsm_modem_send_initial_msc(struct gsm_dlci *dlci) +{ + u8 modembits[2]; + + if (dlci->adaption != 1 || dlci->gsm->encoding != GSM_BASIC_OPT) + return 0; + + modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */ + modembits[1] = (gsm_encode_modem(dlci) << 1) | EA; + return gsm_control_command(dlci->gsm, CMD_MSC, (const u8 *)&modembits, 2); +} + +/** * gsm_modem_update - send modem status line state * @dlci: channel * @brk: break signal diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 35369a2f77b2..2f8e3ea4fe12 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -1641,6 +1641,8 @@ static int max310x_i2c_probe(struct i2c_client *client) port_client = devm_i2c_new_dummy_device(&client->dev, client->adapter, port_addr); + if (IS_ERR(port_client)) + return PTR_ERR(port_client); regcfg_i2c.name = max310x_regmap_name(i); regmaps[i] = devm_regmap_init_i2c(port_client, ®cfg_i2c); diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index a6551a795f74..0b414d1168dd 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -98,7 +98,6 @@ static void hv_uio_channel_cb(void *context) struct hv_device *hv_dev = chan->device_obj; struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); - chan->inbound.ring_buffer->interrupt_mask = 1; virt_mb(); uio_event_notify(&pdata->info); @@ -163,8 +162,6 @@ hv_uio_new_channel(struct vmbus_channel *new_sc) return; } - /* Disable interrupts on sub channel */ - new_sc->inbound.ring_buffer->interrupt_mask = 1; set_channel_read_mode(new_sc, HV_CALL_ISR); ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap); if (ret) { @@ -207,9 +204,7 @@ hv_uio_open(struct uio_info *info, struct inode *inode) ret = vmbus_connect_ring(dev->channel, hv_uio_channel_cb, dev->channel); - if (ret == 0) - dev->channel->inbound.ring_buffer->interrupt_mask = 1; - else + if (ret) atomic_dec(&pdata->refcnt); return ret; diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c index 36781ea60f6a..4a9eff7fdb12 100644 --- a/drivers/usb/cdns3/cdnsp-pci.c +++ b/drivers/usb/cdns3/cdnsp-pci.c @@ -91,7 +91,7 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, cdnsp = kzalloc(sizeof(*cdnsp), GFP_KERNEL); if (!cdnsp) { ret = -ENOMEM; - goto disable_pci; + goto put_pci; } } @@ -174,9 +174,6 @@ free_cdnsp: if (!pci_is_enabled(func)) kfree(cdnsp); -disable_pci: - pci_disable_device(pdev); - put_pci: pci_dev_put(func); diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 1b4d0056f1d0..82282373c786 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1750,6 +1750,8 @@ static int configfs_composite_bind(struct usb_gadget *gadget, cdev->use_os_string = true; cdev->b_vendor_code = gi->b_vendor_code; memcpy(cdev->qw_sign, gi->qw_sign, OS_STRING_QW_SIGN_LEN); + } else { + cdev->use_os_string = false; } if (gadget_is_otg(gadget) && !otg_desc[0]) { diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index dcf31a592f5d..4b5f03f683f7 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -1916,7 +1916,7 @@ error: if (hcd) { kfree(max3421_hcd->tx); kfree(max3421_hcd->rx); - if (max3421_hcd->spi_thread) + if (!IS_ERR_OR_NULL(max3421_hcd->spi_thread)) kthread_stop(max3421_hcd->spi_thread); usb_put_hcd(hcd); } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1002fa51a25a..f377725a1212 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1199,19 +1199,16 @@ reset_done: * Stopped state, but it will soon change to Running. * * Assume this bug on unexpected Stop Endpoint failures. - * Keep retrying until the EP starts and stops again. + * Keep retrying until the EP starts and stops again, on + * chips where this is known to help. Wait for 100ms. */ + if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) + break; fallthrough; case EP_STATE_RUNNING: /* Race, HW handled stop ep cmd before ep was running */ xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n", GET_EP_CTX_STATE(ep_ctx)); - /* - * Don't retry forever if we guessed wrong or a defective HC never starts - * the EP or says 'Running' but fails the command. We must give back TDs. - */ - if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) - break; command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) { diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 6497c4e81e95..9bf8fc6247ba 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -147,6 +147,7 @@ config USB_APPLEDISPLAY config USB_QCOM_EUD tristate "QCOM Embedded USB Debugger(EUD) Driver" depends on ARCH_QCOM || COMPILE_TEST + select QCOM_SCM select USB_ROLE_SWITCH help This module enables support for Qualcomm Technologies, Inc. diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c index 19906301a4eb..012e3b9d9bcc 100644 --- a/drivers/usb/misc/qcom_eud.c +++ b/drivers/usb/misc/qcom_eud.c @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/usb/role.h> +#include <linux/firmware/qcom/qcom_scm.h> #define EUD_REG_INT1_EN_MASK 0x0024 #define EUD_REG_INT_STATUS_1 0x0044 @@ -34,7 +35,7 @@ struct eud_chip { struct device *dev; struct usb_role_switch *role_sw; void __iomem *base; - void __iomem *mode_mgr; + phys_addr_t mode_mgr; unsigned int int_status; int irq; bool enabled; @@ -43,18 +44,29 @@ struct eud_chip { static int enable_eud(struct eud_chip *priv) { + int ret; + + ret = qcom_scm_io_writel(priv->mode_mgr + EUD_REG_EUD_EN2, 1); + if (ret) + return ret; + writel(EUD_ENABLE, priv->base + EUD_REG_CSR_EUD_EN); writel(EUD_INT_VBUS | EUD_INT_SAFE_MODE, priv->base + EUD_REG_INT1_EN_MASK); - writel(1, priv->mode_mgr + EUD_REG_EUD_EN2); return usb_role_switch_set_role(priv->role_sw, USB_ROLE_DEVICE); } -static void disable_eud(struct eud_chip *priv) +static int disable_eud(struct eud_chip *priv) { + int ret; + + ret = qcom_scm_io_writel(priv->mode_mgr + EUD_REG_EUD_EN2, 0); + if (ret) + return ret; + writel(0, priv->base + EUD_REG_CSR_EUD_EN); - writel(0, priv->mode_mgr + EUD_REG_EUD_EN2); + return 0; } static ssize_t enable_show(struct device *dev, @@ -82,11 +94,12 @@ static ssize_t enable_store(struct device *dev, chip->enabled = enable; else disable_eud(chip); + } else { - disable_eud(chip); + ret = disable_eud(chip); } - return count; + return ret < 0 ? ret : count; } static DEVICE_ATTR_RW(enable); @@ -178,6 +191,7 @@ static void eud_role_switch_release(void *data) static int eud_probe(struct platform_device *pdev) { struct eud_chip *chip; + struct resource *res; int ret; chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); @@ -200,9 +214,10 @@ static int eud_probe(struct platform_device *pdev) if (IS_ERR(chip->base)) return PTR_ERR(chip->base); - chip->mode_mgr = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(chip->mode_mgr)) - return PTR_ERR(chip->mode_mgr); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) + return -ENODEV; + chip->mode_mgr = res->start; chip->irq = platform_get_irq(pdev, 0); if (chip->irq < 0) diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index da09cff55abc..0e732cd53b62 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -328,9 +328,8 @@ static int twl6030_set_vbus(struct phy_companion *comparator, bool enabled) static int twl6030_usb_probe(struct platform_device *pdev) { - u32 ret; struct twl6030_usb *twl; - int status, err; + int status, err, ret; struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 7ee721a877c1..1bdf6f5538f4 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -545,24 +545,23 @@ static irqreturn_t cd321x_interrupt(int irq, void *data) if (!event) goto err_unlock; + tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event); + if (!tps6598x_read_status(tps, &status)) - goto err_clear_ints; + goto err_unlock; if (event & APPLE_CD_REG_INT_POWER_STATUS_UPDATE) if (!tps6598x_read_power_status(tps)) - goto err_clear_ints; + goto err_unlock; if (event & APPLE_CD_REG_INT_DATA_STATUS_UPDATE) if (!tps6598x_read_data_status(tps)) - goto err_clear_ints; + goto err_unlock; /* Handle plug insert or removal */ if (event & APPLE_CD_REG_INT_PLUG_EVENT) tps6598x_handle_plug_event(tps, status); -err_clear_ints: - tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event); - err_unlock: mutex_unlock(&tps->lock); @@ -668,25 +667,24 @@ static irqreturn_t tps6598x_interrupt(int irq, void *data) if (!(event1[0] | event1[1] | event2[0] | event2[1])) goto err_unlock; + tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len); + tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len); + if (!tps6598x_read_status(tps, &status)) - goto err_clear_ints; + goto err_unlock; if ((event1[0] | event2[0]) & TPS_REG_INT_POWER_STATUS_UPDATE) if (!tps6598x_read_power_status(tps)) - goto err_clear_ints; + goto err_unlock; if ((event1[0] | event2[0]) & TPS_REG_INT_DATA_STATUS_UPDATE) if (!tps6598x_read_data_status(tps)) - goto err_clear_ints; + goto err_unlock; /* Handle plug insert or removal */ if ((event1[0] | event2[0]) & TPS_REG_INT_PLUG_EVENT) tps6598x_handle_plug_event(tps, status); -err_clear_ints: - tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len); - tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len); - err_unlock: mutex_unlock(&tps->lock); diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 8dac1edc74d4..a793e30d46b7 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -764,6 +764,17 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag ctrlreq->wValue, vdev->rhport); vdev->udev = usb_get_dev(urb->dev); + /* + * NOTE: A similar operation has been done via + * USB_REQ_GET_DESCRIPTOR handler below, which is + * supposed to always precede USB_REQ_SET_ADDRESS. + * + * It's not entirely clear if operating on a different + * usb_device instance here is a real possibility, + * otherwise this call and vdev->udev assignment above + * should be dropped. + */ + dev_pm_syscore_device(&vdev->udev->dev, true); usb_put_dev(old); spin_lock(&vdev->ud.lock); @@ -784,6 +795,17 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag "Not yet?:Get_Descriptor to device 0 (get max pipe size)\n"); vdev->udev = usb_get_dev(urb->dev); + /* + * Set syscore PM flag for the virtually attached + * devices to ensure they will not enter suspend on + * the client side. + * + * Note this doesn't have any impact on the physical + * devices attached to the host system on the server + * side, hence there is no need to undo the operation + * on disconnect. + */ + dev_pm_syscore_device(&vdev->udev->dev, true); usb_put_dev(old); goto out; diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c index c51f5e4c3dd6..481992142f79 100644 --- a/drivers/vfio/pci/pds/dirty.c +++ b/drivers/vfio/pci/pds/dirty.c @@ -82,7 +82,7 @@ static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_region *region, host_ack_bmp = vzalloc(bytes); if (!host_ack_bmp) { - bitmap_free(host_seq_bmp); + vfree(host_seq_bmp); return -ENOMEM; } diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 73e153f9b449..781731eb95cf 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -1191,6 +1191,7 @@ static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, struct iov_iter iter; u64 translated; int ret; + size_t size; ret = iotlb_translate(vrh, (u64)(uintptr_t)src, len - total_translated, &translated, @@ -1208,9 +1209,9 @@ static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, translated); } - ret = copy_from_iter(dst, translated, &iter); - if (ret < 0) - return ret; + size = copy_from_iter(dst, translated, &iter); + if (size != translated) + return -EFAULT; src += translated; dst += translated; @@ -1237,6 +1238,7 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, struct iov_iter iter; u64 translated; int ret; + size_t size; ret = iotlb_translate(vrh, (u64)(uintptr_t)dst, len - total_translated, &translated, @@ -1254,9 +1256,9 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, translated); } - ret = copy_to_iter(src, translated, &iter); - if (ret < 0) - return ret; + size = copy_to_iter(src, translated, &iter); + if (size != translated) + return -EFAULT; src += translated; dst += translated; diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c index 4d1634c492ec..594b60424d1c 100644 --- a/drivers/video/fbdev/core/fb_cmdline.c +++ b/drivers/video/fbdev/core/fb_cmdline.c @@ -40,7 +40,7 @@ int fb_get_options(const char *name, char **option) bool enabled; if (name) - is_of = strncmp(name, "offb", 4); + is_of = !strncmp(name, "offb", 4); enabled = __video_get_options(name, &options, is_of); diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index be95fcddce4c..85bc40aa8b2c 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -93,6 +93,7 @@ struct simplefb_par { static void simplefb_clocks_destroy(struct simplefb_par *par); static void simplefb_regulators_destroy(struct simplefb_par *par); +static void simplefb_detach_genpds(void *res); /* * fb_ops.fb_destroy is called by the last put_fb_info() call at the end @@ -105,6 +106,7 @@ static void simplefb_destroy(struct fb_info *info) simplefb_regulators_destroy(info->par); simplefb_clocks_destroy(info->par); + simplefb_detach_genpds(info->par); if (info->screen_base) iounmap(info->screen_base); @@ -454,13 +456,14 @@ static void simplefb_detach_genpds(void *res) if (!IS_ERR_OR_NULL(par->genpds[i])) dev_pm_domain_detach(par->genpds[i], true); } + par->num_genpds = 0; } static int simplefb_attach_genpds(struct simplefb_par *par, struct platform_device *pdev) { struct device *dev = &pdev->dev; - unsigned int i; + unsigned int i, num_genpds; int err; err = of_count_phandle_with_args(dev->of_node, "power-domains", @@ -474,26 +477,35 @@ static int simplefb_attach_genpds(struct simplefb_par *par, return err; } - par->num_genpds = err; + num_genpds = err; /* * Single power-domain devices are handled by the driver core, so * nothing to do here. */ - if (par->num_genpds <= 1) + if (num_genpds <= 1) { + par->num_genpds = num_genpds; return 0; + } - par->genpds = devm_kcalloc(dev, par->num_genpds, sizeof(*par->genpds), + par->genpds = devm_kcalloc(dev, num_genpds, sizeof(*par->genpds), GFP_KERNEL); if (!par->genpds) return -ENOMEM; - par->genpd_links = devm_kcalloc(dev, par->num_genpds, + par->genpd_links = devm_kcalloc(dev, num_genpds, sizeof(*par->genpd_links), GFP_KERNEL); if (!par->genpd_links) return -ENOMEM; + /* + * Set par->num_genpds only after genpds and genpd_links are allocated + * to exit early from simplefb_detach_genpds() without full + * initialisation. + */ + par->num_genpds = num_genpds; + for (i = 0; i < par->num_genpds; i++) { par->genpds[i] = dev_pm_domain_attach_by_id(dev, i); if (IS_ERR(par->genpds[i])) { @@ -515,9 +527,10 @@ static int simplefb_attach_genpds(struct simplefb_par *par, dev_warn(dev, "failed to link power-domain %u\n", i); } - return devm_add_action_or_reset(dev, simplefb_detach_genpds, par); + return 0; } #else +static void simplefb_detach_genpds(void *res) { } static int simplefb_attach_genpds(struct simplefb_par *par, struct platform_device *pdev) { @@ -631,18 +644,20 @@ static int simplefb_probe(struct platform_device *pdev) ret = devm_aperture_acquire_for_platform_device(pdev, par->base, par->size); if (ret) { dev_err(&pdev->dev, "Unable to acquire aperture: %d\n", ret); - goto error_regulators; + goto error_genpds; } ret = register_framebuffer(info); if (ret < 0) { dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret); - goto error_regulators; + goto error_genpds; } dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node); return 0; +error_genpds: + simplefb_detach_genpds(par); error_regulators: simplefb_regulators_destroy(par); error_clocks: diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index 867f9f311379..a4b497ecfa20 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c @@ -100,6 +100,8 @@ static int mpc8xxx_wdt_start(struct watchdog_device *w) ddata->swtc = tmp >> 16; set_bit(WDOG_HW_RUNNING, &ddata->wdd.status); + mpc8xxx_wdt_keepalive(ddata); + return 0; } diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 81effbd53dc5..ecd3ddda6ccb 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -1320,14 +1320,17 @@ int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev, } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi); -static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) +static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn, + bool percpu) { struct evtchn_status status; evtchn_port_t port; - int rc = -ENOENT; + bool exists = false; memset(&status, 0, sizeof(status)); for (port = 0; port < xen_evtchn_max_channels(); port++) { + int rc; + status.dom = DOMID_SELF; status.port = port; rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); @@ -1335,12 +1338,16 @@ static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) continue; if (status.status != EVTCHNSTAT_virq) continue; - if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { + if (status.u.virq != virq) + continue; + if (status.vcpu == xen_vcpu_nr(cpu)) { *evtchn = port; - break; + return 0; + } else if (!percpu) { + exists = true; } } - return rc; + return exists ? -EEXIST : -ENOENT; } /** @@ -1387,8 +1394,11 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) evtchn = bind_virq.port; else { if (ret == -EEXIST) - ret = find_virq(virq, cpu, &evtchn); - BUG_ON(ret < 0); + ret = find_virq(virq, cpu, &evtchn, percpu); + if (ret) { + __unbind_from_irq(info, info->irq); + goto out; + } } ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq); @@ -1793,9 +1803,20 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu) * virq or IPI channel, which don't actually need to be rebound. Ignore * it, but don't do the xenlinux-level rebind in that case. */ - if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) { + int old_cpu = info->cpu; + bind_evtchn_to_cpu(info, tcpu, false); + if (info->type == IRQT_VIRQ) { + int virq = info->u.virq; + int irq = per_cpu(virq_to_irq, old_cpu)[virq]; + + per_cpu(virq_to_irq, old_cpu)[virq] = -1; + per_cpu(virq_to_irq, tcpu)[virq] = irq; + } + } + do_unmask(info, EVT_MASK_REASON_TEMPORARY); return 0; diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index b4b4ebed68da..8f543c816347 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -116,7 +116,7 @@ static void do_suspend(void) err = dpm_suspend_start(PMSG_FREEZE); if (err) { pr_err("%s: dpm_suspend_start %d\n", __func__, err); - goto out_thaw; + goto out_resume_end; } printk(KERN_DEBUG "suspending xenstore...\n"); @@ -156,6 +156,7 @@ out_resume: else xs_suspend_cancel(); +out_resume_end: dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); out_thaw: |