/* Tally up the load of all CPUs in the group */
                avg_load = 0;
 
-               for_each_cpu_mask_nr(i, group->cpumask) {
+               for_each_cpu(i, &group->cpumask) {
                        /* Bias balancing toward cpus of our domain */
                        if (local_group)
                                load = source_load(i, load_idx);
        /* Traverse only the allowed CPUs */
        cpus_and(*tmp, group->cpumask, p->cpus_allowed);
 
-       for_each_cpu_mask_nr(i, *tmp) {
+       for_each_cpu(i, tmp) {
                load = weighted_cpuload(i);
 
                if (load < min_load || (load == min_load && i == this_cpu)) {
                max_cpu_load = 0;
                min_cpu_load = ~0UL;
 
-               for_each_cpu_mask_nr(i, group->cpumask) {
+               for_each_cpu(i, &group->cpumask) {
                        struct rq *rq;
 
                        if (!cpu_isset(i, *cpus))
        unsigned long max_load = 0;
        int i;
 
-       for_each_cpu_mask_nr(i, group->cpumask) {
+       for_each_cpu(i, &group->cpumask) {
                unsigned long wl;
 
                if (!cpu_isset(i, *cpus))
                int balance_cpu;
 
                cpu_clear(this_cpu, cpus);
-               for_each_cpu_mask_nr(balance_cpu, cpus) {
+               for_each_cpu(balance_cpu, &cpus) {
                        /*
                         * If this cpu gets work to do, stop the load balancing
                         * work being done for other cpus. Next load
 
        cpus_clear(*covered);
 
-       for_each_cpu_mask_nr(i, *span) {
+       for_each_cpu(i, span) {
                struct sched_group *sg;
                int group = group_fn(i, cpu_map, &sg, tmpmask);
                int j;
                cpus_clear(sg->cpumask);
                sg->__cpu_power = 0;
 
-               for_each_cpu_mask_nr(j, *span) {
+               for_each_cpu(j, span) {
                        if (group_fn(j, cpu_map, NULL, tmpmask) != group)
                                continue;
 
        if (!sg)
                return;
        do {
-               for_each_cpu_mask_nr(j, sg->cpumask) {
+               for_each_cpu(j, &sg->cpumask) {
                        struct sched_domain *sd;
 
                        sd = &per_cpu(phys_domains, j);
 {
        int cpu, i;
 
-       for_each_cpu_mask_nr(cpu, *cpu_map) {
+       for_each_cpu(cpu, cpu_map) {
                struct sched_group **sched_group_nodes
                        = sched_group_nodes_bycpu[cpu];
 
        /*
         * Set up domains for cpus specified by the cpu_map.
         */
-       for_each_cpu_mask_nr(i, *cpu_map) {
+       for_each_cpu(i, cpu_map) {
                struct sched_domain *sd = NULL, *p;
                SCHED_CPUMASK_VAR(nodemask, allmasks);
 
 
 #ifdef CONFIG_SCHED_SMT
        /* Set up CPU (sibling) groups */
-       for_each_cpu_mask_nr(i, *cpu_map) {
+       for_each_cpu(i, cpu_map) {
                SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
                SCHED_CPUMASK_VAR(send_covered, allmasks);
 
 
 #ifdef CONFIG_SCHED_MC
        /* Set up multi-core groups */
-       for_each_cpu_mask_nr(i, *cpu_map) {
+       for_each_cpu(i, cpu_map) {
                SCHED_CPUMASK_VAR(this_core_map, allmasks);
                SCHED_CPUMASK_VAR(send_covered, allmasks);
 
                        goto error;
                }
                sched_group_nodes[i] = sg;
-               for_each_cpu_mask_nr(j, *nodemask) {
+               for_each_cpu(j, nodemask) {
                        struct sched_domain *sd;
 
                        sd = &per_cpu(node_domains, j);
 
        /* Calculate CPU power for physical packages and nodes */
 #ifdef CONFIG_SCHED_SMT
-       for_each_cpu_mask_nr(i, *cpu_map) {
+       for_each_cpu(i, cpu_map) {
                struct sched_domain *sd = &per_cpu(cpu_domains, i);
 
                init_sched_groups_power(i, sd);
        }
 #endif
 #ifdef CONFIG_SCHED_MC
-       for_each_cpu_mask_nr(i, *cpu_map) {
+       for_each_cpu(i, cpu_map) {
                struct sched_domain *sd = &per_cpu(core_domains, i);
 
                init_sched_groups_power(i, sd);
        }
 #endif
 
-       for_each_cpu_mask_nr(i, *cpu_map) {
+       for_each_cpu(i, cpu_map) {
                struct sched_domain *sd = &per_cpu(phys_domains, i);
 
                init_sched_groups_power(i, sd);
 #endif
 
        /* Attach the domains */
-       for_each_cpu_mask_nr(i, *cpu_map) {
+       for_each_cpu(i, cpu_map) {
                struct sched_domain *sd;
 #ifdef CONFIG_SCHED_SMT
                sd = &per_cpu(cpu_domains, i);
        cpumask_t tmpmask;
        int i;
 
-       for_each_cpu_mask_nr(i, *cpu_map)
+       for_each_cpu(i, cpu_map)
                cpu_attach_domain(NULL, &def_root_domain, i);
        synchronize_sched();
        arch_destroy_sched_domains(cpu_map, &tmpmask);