struct cpu_workqueue_stats *cws;
unsigned long flags;
- WARN_ON(cpu < 0 || cpu >= num_possible_cpus());
+ WARN_ON(cpu < 0);
/* Workqueues are sometimes created in atomic context */
cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
pr_warning("trace_workqueue: not enough memory\n");
return;
}
- tracing_record_cmdline(wq_thread);
-
INIT_LIST_HEAD(&cws->list);
cws->cpu = cpu;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
- for (++cpu ; cpu < num_possible_cpus(); cpu++) {
- ret = workqueue_stat_start_cpu(cpu);
- if (ret)
- return ret;
- }
- return NULL;
+ do {
+ cpu = cpumask_next(cpu, cpu_possible_mask);
+ if (cpu >= nr_cpu_ids)
+ return NULL;
+ } while (!(ret = workqueue_stat_start_cpu(cpu)));
+ return ret;
}
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
struct cpu_workqueue_stats *cws = p;
unsigned long flags;
int cpu = cws->cpu;
+ struct task_struct *tsk = find_task_by_vpid(cws->pid);
seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
atomic_read(&cws->inserted),
cws->executed,
- trace_find_cmdline(cws->pid));
+ tsk ? tsk->comm : "<...>");
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (&cws->list == workqueue_cpu_stat(cpu)->list.next)