diff options
Diffstat (limited to 'kernel/irq/manage.c')
| -rw-r--r-- | kernel/irq/manage.c | 70 | 
1 files changed, 70 insertions, 0 deletions
| diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c460e0496006..c826ba4141fe 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -371,6 +371,76 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,  	return ret;  } +/** + * irq_update_affinity_desc - Update affinity management for an interrupt + * @irq:	The interrupt number to update + * @affinity:	Pointer to the affinity descriptor + * + * This interface can be used to configure the affinity management of + * interrupts which have been allocated already. + * + * There are certain limitations on when it may be used - attempts to use it + * for when the kernel is configured for generic IRQ reservation mode (in + * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with + * managed/non-managed interrupt accounting. In addition, attempts to use it on + * an interrupt which is already started or which has already been configured + * as managed will also fail, as these mean invalid init state or double init. + */ +int irq_update_affinity_desc(unsigned int irq, +			     struct irq_affinity_desc *affinity) +{ +	struct irq_desc *desc; +	unsigned long flags; +	bool activated; +	int ret = 0; + +	/* +	 * Supporting this with the reservation scheme used by x86 needs +	 * some more thought. Fail it for now. +	 */ +	if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) +		return -EOPNOTSUPP; + +	desc = irq_get_desc_buslock(irq, &flags, 0); +	if (!desc) +		return -EINVAL; + +	/* Requires the interrupt to be shut down */ +	if (irqd_is_started(&desc->irq_data)) { +		ret = -EBUSY; +		goto out_unlock; +	} + +	/* Interrupts which are already managed cannot be modified */ +	if (irqd_affinity_is_managed(&desc->irq_data)) { +		ret = -EBUSY; +		goto out_unlock; +	} + +	/* +	 * Deactivate the interrupt. That's required to undo +	 * anything an earlier activation has established. +	 */ +	activated = irqd_is_activated(&desc->irq_data); +	if (activated) +		irq_domain_deactivate_irq(&desc->irq_data); + +	if (affinity->is_managed) { +		irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); +		irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); +	} + +	cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); + +	/* Restore the activation state */ +	if (activated) +		irq_domain_activate_irq(&desc->irq_data, false); + +out_unlock: +	irq_put_desc_busunlock(desc, flags); +	return ret; +} +  int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)  {  	struct irq_desc *desc = irq_to_desc(irq); | 
