}
 
 #ifdef CONFIG_HOTPLUG_CPU
-static cpumask_t downed_cpus;
+static cpumask_var_t downed_cpus;
 
 static void enter_uniprocessor(void)
 {
        int cpu;
        int err;
 
+       if (downed_cpus == NULL &&
+           !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
+               pr_notice(NAME "Failed to allocate mask\n");
+               goto out;
+       }
+
        get_online_cpus();
-       downed_cpus = cpu_online_map;
-       cpu_clear(first_cpu(cpu_online_map), downed_cpus);
+       cpumask_copy(downed_cpus, cpu_online_mask);
+       cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
        if (num_online_cpus() > 1)
                pr_notice(NAME "Disabling non-boot CPUs...\n");
        put_online_cpus();
 
-       for_each_cpu_mask(cpu, downed_cpus) {
+       for_each_cpu(cpu, downed_cpus) {
                err = cpu_down(cpu);
                if (!err)
                        pr_info(NAME "CPU%d is down.\n", cpu);
                else
                        pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err);
        }
+out:
        if (num_online_cpus() > 1)
                pr_warning(NAME "multiple CPUs still online, "
                                                "may miss events.\n");
        int cpu;
        int err;
 
-       if (cpus_weight(downed_cpus) == 0)
+       if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0)
                return;
        pr_notice(NAME "Re-enabling CPUs...\n");
-       for_each_cpu_mask(cpu, downed_cpus) {
+       for_each_cpu(cpu, downed_cpus) {
                err = cpu_up(cpu);
                if (!err)
                        pr_info(NAME "enabled CPU%d.\n", cpu);