Source
158
158
return -EINVAL;
159
159
160
160
raw_spin_lock(&irq_controller_lock);
161
161
reg = hip04_dist_base(d) + GIC_DIST_TARGET + ((hip04_irq(d) * 2) & ~3);
162
162
mask = 0xffff << shift;
163
163
bit = hip04_cpu_map[cpu] << shift;
164
164
val = readl_relaxed(reg) & ~mask;
165
165
writel_relaxed(val | bit, reg);
166
166
raw_spin_unlock(&irq_controller_lock);
167
167
168
+
irq_data_update_effective_affinity(d, cpumask_of(cpu));
169
+
168
170
return IRQ_SET_MASK_OK;
169
171
}
170
172
#endif
171
173
172
174
static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
173
175
{
174
176
u32 irqstat, irqnr;
175
177
void __iomem *cpu_base = hip04_data.cpu_base;
176
178
177
179
do {
305
307
{
306
308
if (hw < 32) {
307
309
irq_set_percpu_devid(irq);
308
310
irq_set_chip_and_handler(irq, &hip04_irq_chip,
309
311
handle_percpu_devid_irq);
310
312
irq_set_status_flags(irq, IRQ_NOAUTOEN);
311
313
} else {
312
314
irq_set_chip_and_handler(irq, &hip04_irq_chip,
313
315
handle_fasteoi_irq);
314
316
irq_set_probe(irq);
317
+
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
315
318
}
316
319
irq_set_chip_data(irq, d->host_data);
317
320
return 0;
318
321
}
319
322
320
323
static int hip04_irq_domain_xlate(struct irq_domain *d,
321
324
struct device_node *controller,
322
325
const u32 *intspec, unsigned int intsize,
323
326
unsigned long *out_hwirq,
324
327
unsigned int *out_type)