struct sched_domain *sd = data;
        int i;
 
-       for_each_cpu_mask(i, sd->span) {
+       for_each_cpu(i, sched_domain_span(sd)) {
                /*
                 * If there are currently no tasks on the cpu pretend there
                 * is one of average load so that when a new task gets to
        if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
                shares = tg->shares;
 
-       for_each_cpu_mask(i, sd->span)
+       for_each_cpu(i, sched_domain_span(sd))
                update_group_shares_cpu(tg, i, shares, rq_weight);
 
        return 0;
                int i;
 
                /* Skip over this group if it has no CPUs allowed */
-               if (!cpus_intersects(group->cpumask, p->cpus_allowed))
+               if (!cpumask_intersects(sched_group_cpus(group),
+                                       &p->cpus_allowed))
                        continue;
 
-               local_group = cpu_isset(this_cpu, group->cpumask);
+               local_group = cpumask_test_cpu(this_cpu,
+                                              sched_group_cpus(group));
 
                /* Tally up the load of all CPUs in the group */
                avg_load = 0;
 
-               for_each_cpu(i, &group->cpumask) {
+               for_each_cpu(i, sched_group_cpus(group)) {
                        /* Bias balancing toward cpus of our domain */
                        if (local_group)
                                load = source_load(i, load_idx);
  * find_idlest_cpu - find the idlest cpu among the cpus in group.
  */
 static int
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
-               cpumask_t *tmp)
+find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 {
        unsigned long load, min_load = ULONG_MAX;
        int idlest = -1;
        int i;
 
        /* Traverse only the allowed CPUs */
-       cpus_and(*tmp, group->cpumask, p->cpus_allowed);
-
-       for_each_cpu(i, tmp) {
+       for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
                load = weighted_cpuload(i);
 
                if (load < min_load || (load == min_load && i == this_cpu)) {
                update_shares(sd);
 
        while (sd) {
-               cpumask_t span, tmpmask;
                struct sched_group *group;
                int new_cpu, weight;
 
                        continue;
                }
 
-               span = sd->span;
                group = find_idlest_group(sd, t, cpu);
                if (!group) {
                        sd = sd->child;
                        continue;
                }
 
-               new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask);
+               new_cpu = find_idlest_cpu(group, t, cpu);
                if (new_cpu == -1 || new_cpu == cpu) {
                        /* Now try balancing at a lower domain level of cpu */
                        sd = sd->child;
 
                /* Now try balancing at a lower domain level of new_cpu */
                cpu = new_cpu;
+               weight = cpumask_weight(sched_domain_span(sd));
                sd = NULL;
-               weight = cpus_weight(span);
                for_each_domain(cpu, tmp) {
-                       if (weight <= cpus_weight(tmp->span))
+                       if (weight <= cpumask_weight(sched_domain_span(tmp)))
                                break;
                        if (tmp->flags & flag)
                                sd = tmp;
                cpu = task_cpu(p);
 
                for_each_domain(this_cpu, sd) {
-                       if (cpu_isset(cpu, sd->span)) {
+                       if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
                                update_shares(sd);
                                break;
                        }
        else {
                struct sched_domain *sd;
                for_each_domain(this_cpu, sd) {
-                       if (cpu_isset(cpu, sd->span)) {
+                       if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
                                schedstat_inc(sd, ttwu_wake_remote);
                                break;
                        }
                unsigned long sum_avg_load_per_task;
                unsigned long avg_load_per_task;
 
-               local_group = cpu_isset(this_cpu, group->cpumask);
+               local_group = cpumask_test_cpu(this_cpu,
+                                              sched_group_cpus(group));
 
                if (local_group)
-                       balance_cpu = first_cpu(group->cpumask);
+                       balance_cpu = cpumask_first(sched_group_cpus(group));
 
                /* Tally up the load of all CPUs in the group */
                sum_weighted_load = sum_nr_running = avg_load = 0;
                max_cpu_load = 0;
                min_cpu_load = ~0UL;
 
-               for_each_cpu(i, &group->cpumask) {
-                       struct rq *rq;
-
-                       if (!cpu_isset(i, *cpus))
-                               continue;
-
-                       rq = cpu_rq(i);
+               for_each_cpu_and(i, sched_group_cpus(group), cpus) {
+                       struct rq *rq = cpu_rq(i);
 
                        if (*sd_idle && rq->nr_running)
                                *sd_idle = 0;
                 */
                if ((sum_nr_running < min_nr_running) ||
                    (sum_nr_running == min_nr_running &&
-                    first_cpu(group->cpumask) <
-                    first_cpu(group_min->cpumask))) {
+                    cpumask_first(sched_group_cpus(group)) <
+                    cpumask_first(sched_group_cpus(group_min)))) {
                        group_min = group;
                        min_nr_running = sum_nr_running;
                        min_load_per_task = sum_weighted_load /
                if (sum_nr_running <= group_capacity - 1) {
                        if (sum_nr_running > leader_nr_running ||
                            (sum_nr_running == leader_nr_running &&
-                            first_cpu(group->cpumask) >
-                             first_cpu(group_leader->cpumask))) {
+                            cpumask_first(sched_group_cpus(group)) >
+                            cpumask_first(sched_group_cpus(group_leader)))) {
                                group_leader = group;
                                leader_nr_running = sum_nr_running;
                        }
        unsigned long max_load = 0;
        int i;
 
-       for_each_cpu(i, &group->cpumask) {
+       for_each_cpu(i, sched_group_cpus(group)) {
                unsigned long wl;
 
                if (!cpu_isset(i, *cpus))
        /* Search for an sd spanning us and the target CPU. */
        for_each_domain(target_cpu, sd) {
                if ((sd->flags & SD_LOAD_BALANCE) &&
-                   cpu_isset(busiest_cpu, sd->span))
+                   cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
                                break;
        }
 
        struct sched_group *group = sd->groups;
        char str[256];
 
-       cpulist_scnprintf(str, sizeof(str), sd->span);
+       cpulist_scnprintf(str, sizeof(str), *sched_domain_span(sd));
        cpus_clear(*groupmask);
 
        printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
 
        printk(KERN_CONT "span %s level %s\n", str, sd->name);
 
-       if (!cpu_isset(cpu, sd->span)) {
+       if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
                printk(KERN_ERR "ERROR: domain->span does not contain "
                                "CPU%d\n", cpu);
        }
-       if (!cpu_isset(cpu, group->cpumask)) {
+       if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
                printk(KERN_ERR "ERROR: domain->groups does not contain"
                                " CPU%d\n", cpu);
        }
                        break;
                }
 
-               if (!cpus_weight(group->cpumask)) {
+               if (!cpumask_weight(sched_group_cpus(group))) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: empty group\n");
                        break;
                }
 
-               if (cpus_intersects(*groupmask, group->cpumask)) {
+               if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: repeated CPUs\n");
                        break;
                }
 
-               cpus_or(*groupmask, *groupmask, group->cpumask);
+               cpumask_or(groupmask, groupmask, sched_group_cpus(group));
 
-               cpulist_scnprintf(str, sizeof(str), group->cpumask);
+               cpulist_scnprintf(str, sizeof(str), *sched_group_cpus(group));
                printk(KERN_CONT " %s", str);
 
                group = group->next;
        } while (group != sd->groups);
        printk(KERN_CONT "\n");
 
-       if (!cpus_equal(sd->span, *groupmask))
+       if (!cpumask_equal(sched_domain_span(sd), groupmask))
                printk(KERN_ERR "ERROR: groups don't span domain->span\n");
 
-       if (sd->parent && !cpus_subset(*groupmask, sd->parent->span))
+       if (sd->parent &&
+           !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
                printk(KERN_ERR "ERROR: parent span is not a superset "
                        "of domain->span\n");
        return 0;
 
 static int sd_degenerate(struct sched_domain *sd)
 {
-       if (cpus_weight(sd->span) == 1)
+       if (cpumask_weight(sched_domain_span(sd)) == 1)
                return 1;
 
        /* Following flags need at least 2 groups */
        if (sd_degenerate(parent))
                return 1;
 
-       if (!cpus_equal(sd->span, parent->span))
+       if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
                return 0;
 
        /* Does parent contain flags not in child? */
                int group = group_fn(i, cpu_map, &sg, tmpmask);
                int j;
 
-               if (cpu_isset(i, *covered))
+               if (cpumask_test_cpu(i, covered))
                        continue;
 
-               cpus_clear(sg->cpumask);
+               cpumask_clear(sched_group_cpus(sg));
                sg->__cpu_power = 0;
 
                for_each_cpu(j, span) {
                                continue;
 
                        cpu_set(j, *covered);
-                       cpu_set(j, sg->cpumask);
+                       cpumask_set_cpu(j, sched_group_cpus(sg));
                }
                if (!first)
                        first = sg;
        if (!sg)
                return;
        do {
-               for_each_cpu(j, &sg->cpumask) {
+               for_each_cpu(j, sched_group_cpus(sg)) {
                        struct sched_domain *sd;
 
                        sd = &per_cpu(phys_domains, j);
-                       if (j != first_cpu(sd->groups->cpumask)) {
+                       if (j != cpumask_first(sched_group_cpus(sd->groups))) {
                                /*
                                 * Only add "power" once for each
                                 * physical package.
 
        WARN_ON(!sd || !sd->groups);
 
-       if (cpu != first_cpu(sd->groups->cpumask))
+       if (cpu != cpumask_first(sched_group_cpus(sd->groups)))
                return;
 
        child = sd->child;
                        sd = &per_cpu(allnodes_domains, i);
                        SD_INIT(sd, ALLNODES);
                        set_domain_attribute(sd, attr);
-                       sd->span = *cpu_map;
+                       cpumask_copy(sched_domain_span(sd), cpu_map);
                        cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
                        p = sd;
                        sd_allnodes = 1;
                sd = &per_cpu(node_domains, i);
                SD_INIT(sd, NODE);
                set_domain_attribute(sd, attr);
-               sched_domain_node_span(cpu_to_node(i), &sd->span);
+               sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
                sd->parent = p;
                if (p)
                        p->child = sd;
-               cpus_and(sd->span, sd->span, *cpu_map);
+               cpumask_and(sched_domain_span(sd),
+                           sched_domain_span(sd), cpu_map);
 #endif
 
                p = sd;
                sd = &per_cpu(phys_domains, i);
                SD_INIT(sd, CPU);
                set_domain_attribute(sd, attr);
-               sd->span = *nodemask;
+               cpumask_copy(sched_domain_span(sd), nodemask);
                sd->parent = p;
                if (p)
                        p->child = sd;
                sd = &per_cpu(core_domains, i);
                SD_INIT(sd, MC);
                set_domain_attribute(sd, attr);
-               sd->span = cpu_coregroup_map(i);
-               cpus_and(sd->span, sd->span, *cpu_map);
+               *sched_domain_span(sd) = cpu_coregroup_map(i);
+               cpumask_and(sched_domain_span(sd),
+                           sched_domain_span(sd), cpu_map);
                sd->parent = p;
                p->child = sd;
                cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
                sd = &per_cpu(cpu_domains, i);
                SD_INIT(sd, SIBLING);
                set_domain_attribute(sd, attr);
-               sd->span = per_cpu(cpu_sibling_map, i);
-               cpus_and(sd->span, sd->span, *cpu_map);
+               cpumask_and(sched_domain_span(sd),
+                           &per_cpu(cpu_sibling_map, i), cpu_map);
                sd->parent = p;
                p->child = sd;
                cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
                        sd->groups = sg;
                }
                sg->__cpu_power = 0;
-               sg->cpumask = *nodemask;
+               cpumask_copy(sched_group_cpus(sg), nodemask);
                sg->next = sg;
                cpus_or(*covered, *covered, *nodemask);
                prev = sg;
                                goto error;
                        }
                        sg->__cpu_power = 0;
-                       sg->cpumask = *tmpmask;
+                       cpumask_copy(sched_group_cpus(sg), tmpmask);
                        sg->next = prev->next;
                        cpus_or(*covered, *covered, *tmpmask);
                        prev->next = sg;