Merge "genirq: Add IRQ_AFFINITY_MANAGED flag"
This commit is contained in:
commit
ff528055ec
6 changed files with 47 additions and 6 deletions
|
@ -73,6 +73,7 @@ enum irqchip_irq_state;
|
|||
* it from the spurious interrupt detection
|
||||
* mechanism and from core side polling.
|
||||
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
|
||||
* IRQ_AFFINITY_MANAGED - Affinity is auto-managed by the kernel
|
||||
*/
|
||||
enum {
|
||||
IRQ_TYPE_NONE = 0x00000000,
|
||||
|
@ -99,13 +100,14 @@ enum {
|
|||
IRQ_PER_CPU_DEVID = (1 << 17),
|
||||
IRQ_IS_POLLED = (1 << 18),
|
||||
IRQ_DISABLE_UNLAZY = (1 << 19),
|
||||
IRQ_AFFINITY_MANAGED = (1 << 21),
|
||||
};
|
||||
|
||||
#define IRQF_MODIFY_MASK \
|
||||
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
|
||||
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
|
||||
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
|
||||
IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
|
||||
IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_AFFINITY_MANAGED)
|
||||
|
||||
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
|
||||
|
||||
|
@ -191,6 +193,7 @@ struct irq_data {
|
|||
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
|
||||
* IRQD_WAKEUP_ARMED - Wakeup mode armed
|
||||
* IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
|
||||
* IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel
|
||||
*/
|
||||
enum {
|
||||
IRQD_TRIGGER_MASK = 0xf,
|
||||
|
@ -206,6 +209,7 @@ enum {
|
|||
IRQD_IRQ_INPROGRESS = (1 << 18),
|
||||
IRQD_WAKEUP_ARMED = (1 << 19),
|
||||
IRQD_FORWARDED_TO_VCPU = (1 << 20),
|
||||
IRQD_AFFINITY_MANAGED = (1 << 21),
|
||||
};
|
||||
|
||||
#define __irqd_to_state(d) ((d)->common->state_use_accessors)
|
||||
|
@ -299,6 +303,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
|
|||
__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
|
||||
}
|
||||
|
||||
static inline bool irqd_affinity_is_managed(struct irq_data *d)
|
||||
{
|
||||
return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
|
||||
}
|
||||
|
||||
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
|
||||
{
|
||||
return d->hwirq;
|
||||
|
|
|
@ -836,7 +836,8 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
|
|||
irq_settings_clr_and_set(desc, clr, set);
|
||||
|
||||
irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
|
||||
IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
|
||||
IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT |
|
||||
IRQD_AFFINITY_MANAGED);
|
||||
if (irq_settings_has_no_balance_set(desc))
|
||||
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
|
||||
if (irq_settings_is_per_cpu(desc))
|
||||
|
@ -845,6 +846,8 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
|
|||
irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
|
||||
if (irq_settings_is_level(desc))
|
||||
irqd_set(&desc->irq_data, IRQD_LEVEL);
|
||||
if (irq_settings_has_affinity_managed_set(desc))
|
||||
irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
|
||||
|
||||
irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
|
||||
|
||||
|
|
|
@ -105,6 +105,8 @@ static inline void unregister_handler_proc(unsigned int irq,
|
|||
struct irqaction *action) { }
|
||||
#endif
|
||||
|
||||
extern bool irq_can_set_affinity_usr(unsigned int irq);
|
||||
|
||||
extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
|
||||
|
||||
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
||||
|
|
|
@ -115,12 +115,12 @@ EXPORT_SYMBOL(synchronize_irq);
|
|||
#ifdef CONFIG_SMP
|
||||
cpumask_var_t irq_default_affinity;
|
||||
|
||||
static int __irq_can_set_affinity(struct irq_desc *desc)
|
||||
static bool __irq_can_set_affinity(struct irq_desc *desc)
|
||||
{
|
||||
if (!desc || !irqd_can_balance(&desc->irq_data) ||
|
||||
!desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
|
||||
return 0;
|
||||
return 1;
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -133,6 +133,21 @@ int irq_can_set_affinity(unsigned int irq)
|
|||
return __irq_can_set_affinity(irq_to_desc(irq));
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
|
||||
* @irq: Interrupt to check
|
||||
*
|
||||
* Like irq_can_set_affinity() above, but additionally checks for the
|
||||
* AFFINITY_MANAGED flag.
|
||||
*/
|
||||
bool irq_can_set_affinity_usr(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
return __irq_can_set_affinity(desc) &&
|
||||
!irqd_affinity_is_managed(&desc->irq_data);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_set_thread_affinity - Notify irq threads to adjust affinity
|
||||
* @desc: irq descriptor which has affitnity changed
|
||||
|
|
|
@ -96,7 +96,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
|
|||
cpumask_var_t new_value;
|
||||
int err;
|
||||
|
||||
if (!irq_can_set_affinity(irq) || no_irq_affinity)
|
||||
if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
|
||||
return -EIO;
|
||||
|
||||
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
|
||||
|
|
|
@ -17,6 +17,7 @@ enum {
|
|||
_IRQ_IS_POLLED = IRQ_IS_POLLED,
|
||||
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
|
||||
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
|
||||
_IRQ_AFFINITY_MANAGED = IRQ_AFFINITY_MANAGED,
|
||||
};
|
||||
|
||||
#define IRQ_PER_CPU GOT_YOU_MORON
|
||||
|
@ -32,6 +33,7 @@ enum {
|
|||
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
|
||||
#undef IRQF_MODIFY_MASK
|
||||
#define IRQF_MODIFY_MASK GOT_YOU_MORON
|
||||
#define IRQ_AFFINITY_MANAGED GOT_YOU_MORON
|
||||
|
||||
static inline void
|
||||
irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
|
||||
|
@ -65,6 +67,16 @@ static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
|
|||
return desc->status_use_accessors & _IRQ_NO_BALANCING;
|
||||
}
|
||||
|
||||
static inline void irq_settings_set_affinity_managed(struct irq_desc *desc)
|
||||
{
|
||||
desc->status_use_accessors |= _IRQ_AFFINITY_MANAGED;
|
||||
}
|
||||
|
||||
static inline bool irq_settings_has_affinity_managed_set(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & _IRQ_AFFINITY_MANAGED;
|
||||
}
|
||||
|
||||
static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK;
|
||||
|
|
Loading…
Add table
Reference in a new issue