Blackfin: convert old cpumask API to new one

old cpu_xxx() APIs is planned to removed later. then, converted.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
This commit is contained in:
KOSAKI Motohiro 2011-04-26 10:57:27 +09:00 committed by Mike Frysinger
parent e887eb61e5
commit fecedc8071
4 changed files with 24 additions and 22 deletions

View file

@ -145,16 +145,16 @@ int check_nmi_wdt_touched(void)
{ {
unsigned int this_cpu = smp_processor_id(); unsigned int this_cpu = smp_processor_id();
unsigned int cpu; unsigned int cpu;
cpumask_t mask;
cpumask_t mask = cpu_online_map; cpumask_copy(&mask, cpu_online_mask);
if (!atomic_read(&nmi_touched[this_cpu])) if (!atomic_read(&nmi_touched[this_cpu]))
return 0; return 0;
atomic_set(&nmi_touched[this_cpu], 0); atomic_set(&nmi_touched[this_cpu], 0);
cpu_clear(this_cpu, mask); cpumask_clear_cpu(this_cpu, &mask);
for_each_cpu_mask(cpu, mask) { for_each_cpu(cpu, &mask) {
invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]), invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]),
(unsigned long)(&nmi_touched[cpu])); (unsigned long)(&nmi_touched[cpu]));
if (!atomic_read(&nmi_touched[cpu])) if (!atomic_read(&nmi_touched[cpu]))

View file

@ -1324,7 +1324,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos) static void *c_start(struct seq_file *m, loff_t *pos)
{ {
if (*pos == 0) if (*pos == 0)
*pos = first_cpu(cpu_online_map); *pos = cpumask_first(cpu_online_mask);
if (*pos >= num_online_cpus()) if (*pos >= num_online_cpus())
return NULL; return NULL;
@ -1333,7 +1333,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
static void *c_next(struct seq_file *m, void *v, loff_t *pos) static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{ {
*pos = next_cpu(*pos, cpu_online_map); *pos = cpumask_next(*pos, cpu_online_mask);
return c_start(m, pos); return c_start(m, pos);
} }

View file

@ -85,10 +85,11 @@ static void bfin_wakeup_cpu(void)
{ {
unsigned int cpu; unsigned int cpu;
unsigned int this_cpu = smp_processor_id(); unsigned int this_cpu = smp_processor_id();
cpumask_t mask = cpu_online_map; cpumask_t mask;
cpu_clear(this_cpu, mask); cpumask_copy(&mask, cpu_online_mask);
for_each_cpu_mask(cpu, mask) cpumask_clear_cpu(this_cpu, &mask);
for_each_cpu(cpu, &mask)
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0); platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
} }

View file

@ -97,7 +97,7 @@ static void ipi_cpu_stop(unsigned int cpu)
dump_stack(); dump_stack();
spin_unlock(&stop_lock); spin_unlock(&stop_lock);
cpu_clear(cpu, cpu_online_map); set_cpu_online(cpu, false);
local_irq_disable(); local_irq_disable();
@ -147,7 +147,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
*/ */
resync_core_dcache(); resync_core_dcache();
#endif #endif
cpu_clear(cpu, *msg->call_struct.waitmask); cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
} }
} }
@ -223,9 +223,10 @@ static inline void smp_send_message(cpumask_t callmap, unsigned long type,
struct ipi_message_queue *msg_queue; struct ipi_message_queue *msg_queue;
struct ipi_message *msg; struct ipi_message *msg;
unsigned long flags, next_msg; unsigned long flags, next_msg;
cpumask_t waitmask = callmap; /* waitmask is shared by all cpus */ cpumask_t waitmask; /* waitmask is shared by all cpus */
for_each_cpu_mask(cpu, callmap) { cpumask_copy(&waitmask, &callmap);
for_each_cpu(cpu, &callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu); msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags); spin_lock_irqsave(&msg_queue->lock, flags);
if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
@ -247,7 +248,7 @@ static inline void smp_send_message(cpumask_t callmap, unsigned long type,
} }
if (wait) { if (wait) {
while (!cpus_empty(waitmask)) while (!cpumask_empty(&waitmask))
blackfin_dcache_invalidate_range( blackfin_dcache_invalidate_range(
(unsigned long)(&waitmask), (unsigned long)(&waitmask),
(unsigned long)(&waitmask)); (unsigned long)(&waitmask));
@ -266,9 +267,9 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
cpumask_t callmap; cpumask_t callmap;
preempt_disable(); preempt_disable();
callmap = cpu_online_map; cpumask_copy(&callmap, cpu_online_mask);
cpu_clear(smp_processor_id(), callmap); cpumask_clear_cpu(smp_processor_id(), &callmap);
if (!cpus_empty(callmap)) if (!cpumask_empty(&callmap))
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
preempt_enable(); preempt_enable();
@ -285,8 +286,8 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
if (cpu_is_offline(cpu)) if (cpu_is_offline(cpu))
return 0; return 0;
cpus_clear(callmap); cpumask_clear(&callmap);
cpu_set(cpu, callmap); cpumask_set_cpu(cpu, &callmap);
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
@ -309,9 +310,9 @@ void smp_send_stop(void)
cpumask_t callmap; cpumask_t callmap;
preempt_disable(); preempt_disable();
callmap = cpu_online_map; cpumask_copy(&callmap, cpu_online_mask);
cpu_clear(smp_processor_id(), callmap); cpumask_clear_cpu(smp_processor_id(), &callmap);
if (!cpus_empty(callmap)) if (!cpumask_empty(&callmap))
smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
preempt_enable(); preempt_enable();