diff options
| author | Jiri Kosina <jkosina@suse.cz> | 2018-08-20 18:05:17 +0200 | 
|---|---|---|
| committer | Jiri Kosina <jkosina@suse.cz> | 2018-08-20 18:05:17 +0200 | 
| commit | 415d2b3392d7a80903e0f97f051201aa02bf20e9 (patch) | |
| tree | 47492d2386a0e7f00ef645313cb44ae4960b7e7e /kernel/irq/migration.c | |
| parent | 4f65245f2d178b9cba48350620d76faa4a098841 (diff) | |
| parent | b8e759b8f6dab1c473c30ac12709095d0b81078e (diff) | |
Merge branch 'for-4.19/cougar' into for-linus
New device support for hid-cougar
Diffstat (limited to 'kernel/irq/migration.c')
| -rw-r--r-- | kernel/irq/migration.c | 31 | 
1 files changed, 20 insertions, 11 deletions
| diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 86ae0eb80b53..def48589ea48 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -38,17 +38,18 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)  void irq_move_masked_irq(struct irq_data *idata)  {  	struct irq_desc *desc = irq_data_to_desc(idata); -	struct irq_chip *chip = desc->irq_data.chip; +	struct irq_data *data = &desc->irq_data; +	struct irq_chip *chip = data->chip; -	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) +	if (likely(!irqd_is_setaffinity_pending(data)))  		return; -	irqd_clr_move_pending(&desc->irq_data); +	irqd_clr_move_pending(data);  	/*  	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.  	 */ -	if (irqd_is_per_cpu(&desc->irq_data)) { +	if (irqd_is_per_cpu(data)) {  		WARN_ON(1);  		return;  	} @@ -73,13 +74,24 @@ void irq_move_masked_irq(struct irq_data *idata)  	 * For correct operation this depends on the caller  	 * masking the irqs.  	 */ -	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) -		irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); - +	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) { +		int ret; + +		ret = irq_do_set_affinity(data, desc->pending_mask, false); +		/* +		 * If the there is a cleanup pending in the underlying +		 * vector management, reschedule the move for the next +		 * interrupt. Leave desc->pending_mask intact. +		 */ +		if (ret == -EBUSY) { +			irqd_set_move_pending(data); +			return; +		} +	}  	cpumask_clear(desc->pending_mask);  } -void irq_move_irq(struct irq_data *idata) +void __irq_move_irq(struct irq_data *idata)  {  	bool masked; @@ -90,9 +102,6 @@ void irq_move_irq(struct irq_data *idata)  	 */  	idata = irq_desc_get_irq_data(irq_data_to_desc(idata)); -	if (likely(!irqd_is_setaffinity_pending(idata))) -		return; -  	if (unlikely(irqd_irq_disabled(idata)))  		return; | 
