]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
cpumask: Replace cpumask_of_cpu with cpumask_of_cpu_ptr
authorMike Travis <travis@sgi.com>
Tue, 15 Jul 2008 21:14:30 +0000 (14:14 -0700)
committerIngo Molnar <mingo@elte.hu>
Fri, 18 Jul 2008 20:02:57 +0000 (22:02 +0200)
  * This patch replaces the dangerous lvalue version of cpumask_of_cpu
    with new cpumask_of_cpu_ptr macros.  These are patterned after the
    node_to_cpumask_ptr macros.

    In general terms, if there is a cpumask_of_cpu_map[] then a pointer to
    the cpumask_of_cpu_map[cpu] entry is used.  The cpumask_of_cpu_map
    is provided when there is a large NR_CPUS count, reducing
    greatly the amount of code generated and stack space used for
    cpumask_of_cpu().  The pointer to the cpumask_t value is needed for
    calling set_cpus_allowed_ptr() to reduce the amount of stack space
    needed to pass the cpumask_t value.

    If there isn't a cpumask_of_cpu_map[], then a temporary variable is
    declared and filled in with value from cpumask_of_cpu(cpu) as well as
    a pointer variable pointing to this temporary variable.  Afterwards,
    the pointer is used to reference the cpumask value.  The compiler
    will optimize out the extra dereference through the pointer as well
    as the stack space used for the pointer, resulting in identical code.

    A good example of the orthogonal usages is in net/sunrpc/svc.c:

case SVC_POOL_PERCPU:
{
unsigned int cpu = m->pool_to[pidx];
cpumask_of_cpu_ptr(cpumask, cpu);

*oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpumask);
return 1;
}
case SVC_POOL_PERNODE:
{
unsigned int node = m->pool_to[pidx];
node_to_cpumask_ptr(nodecpumask, node);

*oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, nodecpumask);
return 1;
}

Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
14 files changed:
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/microcode.c
arch/x86/kernel/reboot.c
drivers/acpi/processor_throttling.c
drivers/firmware/dcdbas.c
include/linux/cpumask.h
kernel/stop_machine.c
kernel/trace/trace_sysprof.c
net/sunrpc/svc.c

index c2502eb9aa8355488a7057602bfdfa71134785a7..9220cf46aa10645a50ceb65e8ca24c69c9841958 100644 (file)
@@ -73,6 +73,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
        struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        cpumask_t saved_mask;
+       cpumask_of_cpu_ptr(new_mask, cpu);
        int retval;
        unsigned int eax, ebx, ecx, edx;
        unsigned int edx_part;
@@ -91,7 +92,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
 
        /* Make sure we are running on right CPU */
        saved_mask = current->cpus_allowed;
-       retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       retval = set_cpus_allowed_ptr(current, new_mask);
        if (retval)
                return -1;
 
index dd097b835839dd73d4139d807c23e8fca1648344..ff2fff56f0a8f2f1340a98a3305da24cd1dd3da1 100644 (file)
@@ -200,10 +200,12 @@ static void drv_read(struct drv_cmd *cmd)
 static void drv_write(struct drv_cmd *cmd)
 {
        cpumask_t saved_mask = current->cpus_allowed;
+       cpumask_of_cpu_ptr_declare(cpu_mask);
        unsigned int i;
 
        for_each_cpu_mask_nr(i, cmd->mask) {
-               set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
+               cpumask_of_cpu_ptr_next(cpu_mask, i);
+               set_cpus_allowed_ptr(current, cpu_mask);
                do_drv_write(cmd);
        }
 
@@ -267,11 +269,12 @@ static unsigned int get_measured_perf(unsigned int cpu)
        } aperf_cur, mperf_cur;
 
        cpumask_t saved_mask;
+       cpumask_of_cpu_ptr(cpu_mask, cpu);
        unsigned int perf_percent;
        unsigned int retval;
 
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, cpu_mask);
        if (get_cpu() != cpu) {
                /* We were not able to run on requested processor */
                put_cpu();
@@ -337,6 +340,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
 
 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 {
+       cpumask_of_cpu_ptr(cpu_mask, cpu);
        struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
        unsigned int freq;
        unsigned int cached_freq;
@@ -349,7 +353,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
        }
 
        cached_freq = data->freq_table[data->acpi_data->state].frequency;
-       freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
+       freq = extract_freq(get_cur_val(cpu_mask), data);
        if (freq != cached_freq) {
                /*
                 * The dreaded BIOS frequency change behind our back.
index c45ca6d4dce101fbe7e5044420410edab5ad8b42..53c7b6936973c6da85137145ed0eea8eec18746f 100644 (file)
@@ -479,11 +479,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
 static int check_supported_cpu(unsigned int cpu)
 {
        cpumask_t oldmask;
+       cpumask_of_cpu_ptr(cpu_mask, cpu);
        u32 eax, ebx, ecx, edx;
        unsigned int rc = 0;
 
        oldmask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, cpu_mask);
 
        if (smp_processor_id() != cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
@@ -1016,6 +1017,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
 static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
 {
        cpumask_t oldmask;
+       cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
        struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
        u32 checkfid;
        u32 checkvid;
@@ -1030,7 +1032,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
 
        /* only run on specific CPU from here on */
        oldmask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
+       set_cpus_allowed_ptr(current, cpu_mask);
 
        if (smp_processor_id() != pol->cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1105,6 +1107,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 {
        struct powernow_k8_data *data;
        cpumask_t oldmask;
+       cpumask_of_cpu_ptr_declare(newmask);
        int rc;
 
        if (!cpu_online(pol->cpu))
@@ -1156,7 +1159,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 
        /* only run on specific CPU from here on */
        oldmask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
+       cpumask_of_cpu_ptr_next(newmask, pol->cpu);
+       set_cpus_allowed_ptr(current, newmask);
 
        if (smp_processor_id() != pol->cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1178,7 +1182,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        set_cpus_allowed_ptr(current, &oldmask);
 
        if (cpu_family == CPU_HW_PSTATE)
-               pol->cpus = cpumask_of_cpu(pol->cpu);
+               pol->cpus = *newmask;
        else
                pol->cpus = per_cpu(cpu_core_map, pol->cpu);
        data->available_cores = &(pol->cpus);
@@ -1244,6 +1248,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
 {
        struct powernow_k8_data *data;
        cpumask_t oldmask = current->cpus_allowed;
+       cpumask_of_cpu_ptr(newmask, cpu);
        unsigned int khz = 0;
        unsigned int first;
 
@@ -1253,7 +1258,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
        if (!data)
                return -EINVAL;
 
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, newmask);
        if (smp_processor_id() != cpu) {
                printk(KERN_ERR PFX
                        "limiting to CPU %d failed in powernowk8_get\n", cpu);
index 8b0dd6f2a1acd101aea9c9cca165ab67c3f0b19a..fd561bb26c6088d57a44a21728a367b96370f770 100644 (file)
@@ -313,9 +313,10 @@ static unsigned int get_cur_freq(unsigned int cpu)
        unsigned l, h;
        unsigned clock_freq;
        cpumask_t saved_mask;
+       cpumask_of_cpu_ptr(new_mask, cpu);
 
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, new_mask);
        if (smp_processor_id() != cpu)
                return 0;
 
@@ -554,9 +555,11 @@ static int centrino_target (struct cpufreq_policy *policy,
                 */
 
                if (!cpus_empty(covered_cpus)) {
+                       cpumask_of_cpu_ptr_declare(new_mask);
+
                        for_each_cpu_mask_nr(j, covered_cpus) {
-                               set_cpus_allowed_ptr(current,
-                                                    &cpumask_of_cpu(j));
+                               cpumask_of_cpu_ptr_next(new_mask, j);
+                               set_cpus_allowed_ptr(current, new_mask);
                                wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
                        }
                }
index 191f7263c61dce1b42864cd4709e249773339243..2f3728dc24f60cc41db81e0066927a4ff15f409f 100644 (file)
@@ -244,7 +244,8 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
 
 static unsigned int speedstep_get(unsigned int cpu)
 {
-       return _speedstep_get(&cpumask_of_cpu(cpu));
+       cpumask_of_cpu_ptr(newmask, cpu);
+       return _speedstep_get(newmask);
 }
 
 /**
index a7b0f8f1736bd72e0ef73113d26adc2c46cdf6b1..e4b8d189d7edacf0ac60d2dd636ad54100ce7fdc 100644 (file)
@@ -516,6 +516,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
        unsigned long           j;
        int                     retval;
        cpumask_t               oldmask;
+       cpumask_of_cpu_ptr(newmask, cpu);
 
        if (num_cache_leaves == 0)
                return -ENOENT;
@@ -526,7 +527,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
                return -ENOMEM;
 
        oldmask = current->cpus_allowed;
-       retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       retval = set_cpus_allowed_ptr(current, newmask);
        if (retval)
                goto out;
 
index 56b933119a04f5551094ce08feeb12bf2642aa6b..58520169e35dbbdfd7613a4648100197fbca7140 100644 (file)
@@ -388,6 +388,7 @@ static int do_microcode_update (void)
        void *new_mc = NULL;
        int cpu;
        cpumask_t old;
+       cpumask_of_cpu_ptr_declare(newmask);
 
        old = current->cpus_allowed;
 
@@ -404,7 +405,8 @@ static int do_microcode_update (void)
 
                        if (!uci->valid)
                                continue;
-                       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+                       cpumask_of_cpu_ptr_next(newmask, cpu);
+                       set_cpus_allowed_ptr(current, newmask);
                        error = get_maching_microcode(new_mc, cpu);
                        if (error < 0)
                                goto out;
@@ -574,6 +576,7 @@ static int apply_microcode_check_cpu(int cpu)
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        cpumask_t old;
+       cpumask_of_cpu_ptr(newmask, cpu);
        unsigned int val[2];
        int err = 0;
 
@@ -582,7 +585,7 @@ static int apply_microcode_check_cpu(int cpu)
                return 0;
 
        old = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, newmask);
 
        /* Check if the microcode we have in memory matches the CPU */
        if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
@@ -620,11 +623,12 @@ static int apply_microcode_check_cpu(int cpu)
 static void microcode_init_cpu(int cpu, int resume)
 {
        cpumask_t old;
+       cpumask_of_cpu_ptr(newmask, cpu);
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 
        old = current->cpus_allowed;
 
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, newmask);
        mutex_lock(&microcode_mutex);
        collect_cpu_info(cpu);
        if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
@@ -656,11 +660,12 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz)
                return -EINVAL;
        if (val == 1) {
                cpumask_t old;
+               cpumask_of_cpu_ptr(newmask, cpu);
 
                old = current->cpus_allowed;
 
                get_online_cpus();
-               set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+               set_cpus_allowed_ptr(current, newmask);
 
                mutex_lock(&microcode_mutex);
                if (uci->valid)
index f8a62160e1513221a379a400cfd30dc6e3eaaf0b..214bbdfc851e274bd616fa1a981920ee61f76179 100644 (file)
@@ -403,24 +403,28 @@ void native_machine_shutdown(void)
 {
        /* Stop the cpus and apics */
 #ifdef CONFIG_SMP
-       int reboot_cpu_id;
 
        /* The boot cpu is always logical cpu 0 */
-       reboot_cpu_id = 0;
+       int reboot_cpu_id = 0;
+       cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
 
 #ifdef CONFIG_X86_32
        /* See if there has been given a command line override */
        if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
-               cpu_online(reboot_cpu))
+               cpu_online(reboot_cpu)) {
                reboot_cpu_id = reboot_cpu;
+               cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
+       }
 #endif
 
        /* Make certain the cpu I'm about to reboot on is online */
-       if (!cpu_online(reboot_cpu_id))
+       if (!cpu_online(reboot_cpu_id)) {
                reboot_cpu_id = smp_processor_id();
+               cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
+       }
 
        /* Make certain I only run on the appropriate processor */
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
+       set_cpus_allowed_ptr(current, newmask);
 
        /* O.K Now that I'm on the appropriate processor,
         * stop all of the others.
index a56fc6c4394bb1dab26cb06527d3f89239333683..a2c3f9cfa5490fe5fef15c4c0e48676ad22f837a 100644 (file)
@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
 static int acpi_processor_get_throttling(struct acpi_processor *pr)
 {
        cpumask_t saved_mask;
+       cpumask_of_cpu_ptr_declare(new_mask);
        int ret;
 
        if (!pr)
@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
         * Migrate task to the cpu pointed by pr.
         */
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+       cpumask_of_cpu_ptr_next(new_mask, pr->id);
+       set_cpus_allowed_ptr(current, new_mask);
        ret = pr->throttling.acpi_processor_get_throttling(pr);
        /* restore the previous state */
        set_cpus_allowed_ptr(current, &saved_mask);
@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
 int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
 {
        cpumask_t saved_mask;
+       cpumask_of_cpu_ptr_declare(new_mask);
        int ret = 0;
        unsigned int i;
        struct acpi_processor *match_pr;
@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
         * it can be called only for the cpu pointed by pr.
         */
        if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
-               set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+               cpumask_of_cpu_ptr_next(new_mask, pr->id);
+               set_cpus_allowed_ptr(current, new_mask);
                ret = p_throttling->acpi_processor_set_throttling(pr,
                                                t_state.target_state);
        } else {
@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
                                continue;
                        }
                        t_state.cpu = i;
-                       set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
+                       cpumask_of_cpu_ptr_next(new_mask, i);
+                       set_cpus_allowed_ptr(current, new_mask);
                        ret = match_pr->throttling.
                                acpi_processor_set_throttling(
                                match_pr, t_state.target_state);
index 25918f7dfd0fe73884ff86101ed90c04346e00cb..0b624e927a6fea8c4306949b85d28e0b5b980417 100644 (file)
@@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
 static int smi_request(struct smi_cmd *smi_cmd)
 {
        cpumask_t old_mask;
+       cpumask_of_cpu_ptr(new_mask, 0);
        int ret = 0;
 
        if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
 
        /* SMI requires CPU 0 */
        old_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
+       set_cpus_allowed_ptr(current, new_mask);
        if (smp_processor_id() != 0) {
                dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
                        __func__);
index 80226e776143242778708cfa682a2c0d58b38d3a..2dbd9a287e77473942372bfd25bccf76299e45a7 100644 (file)
  * int next_cpu_nr(cpu, mask)          Next cpu past 'cpu', or nr_cpu_ids
  *
  * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
+ *ifdef CONFIG_HAS_CPUMASK_OF_CPU
+ * cpumask_of_cpu_ptr_declare(v)       Declares cpumask_t *v
+ * cpumask_of_cpu_ptr_next(v, cpu)     Sets v = &cpumask_of_cpu_map[cpu]
+ * cpumask_of_cpu_ptr(v, cpu)          Combines above two operations
+ *else
+ * cpumask_of_cpu_ptr_declare(v)       Declares cpumask_t _v and *v = &_v
+ * cpumask_of_cpu_ptr_next(v, cpu)     Sets _v = cpumask_of_cpu(cpu)
+ * cpumask_of_cpu_ptr(v, cpu)          Combines above two operations
+ *endif
  * CPU_MASK_ALL                                Initializer - all bits set
  * CPU_MASK_NONE                       Initializer - no bits set
  * unsigned long *cpus_addr(mask)      Array of unsigned long's in mask
@@ -236,11 +245,16 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
 
 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
 extern cpumask_t *cpumask_of_cpu_map;
-#define cpumask_of_cpu(cpu)    (cpumask_of_cpu_map[cpu])
-
+#define cpumask_of_cpu(cpu)    (cpumask_of_cpu_map[cpu])
+#define        cpumask_of_cpu_ptr(v, cpu)                                      \
+               const cpumask_t *v = &cpumask_of_cpu(cpu)
+#define        cpumask_of_cpu_ptr_declare(v)                                   \
+               const cpumask_t *v
+#define cpumask_of_cpu_ptr_next(v, cpu)                                        \
+                                       v = &cpumask_of_cpu(cpu)
 #else
 #define cpumask_of_cpu(cpu)                                            \
-(*({                                                                   \
+({                                                                     \
        typeof(_unused_cpumask_arg_) m;                                 \
        if (sizeof(m) == sizeof(unsigned long)) {                       \
                m.bits[0] = 1UL<<(cpu);                                 \
@@ -248,8 +262,16 @@ extern cpumask_t *cpumask_of_cpu_map;
                cpus_clear(m);                                          \
                cpu_set((cpu), m);                                      \
        }                                                               \
-       &m;                                                             \
-}))
+       m;                                                              \
+})
+#define        cpumask_of_cpu_ptr(v, cpu)                                      \
+               cpumask_t _##v = cpumask_of_cpu(cpu);                   \
+               const cpumask_t *v = &_##v
+#define        cpumask_of_cpu_ptr_declare(v)                                   \
+               cpumask_t _##v;                                         \
+               const cpumask_t *v = &_##v
+#define cpumask_of_cpu_ptr_next(v, cpu)                                        \
+                                       _##v = cpumask_of_cpu(cpu)
 #endif
 
 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
index ba9b2054ecbdb99bbb868b4f9c01af0bb2ce2c30..738b411ff2d33dee86042404917640bcb2932ed5 100644 (file)
@@ -33,8 +33,9 @@ static int stopmachine(void *cpu)
 {
        int irqs_disabled = 0;
        int prepared = 0;
+       cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
 
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
+       set_cpus_allowed_ptr(current, cpumask);
 
        /* Ack: we are alive */
        smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
index 2301e1e7c60648d7376f430b32e771257973be8a..63528086337c0145b6e59e4a77c9e80b40686408 100644 (file)
@@ -213,7 +213,9 @@ static void start_stack_timers(void)
        int cpu;
 
        for_each_online_cpu(cpu) {
-               set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+               cpumask_of_cpu_ptr(new_mask, cpu);
+
+               set_cpus_allowed_ptr(current, new_mask);
                start_stack_timer(cpu);
        }
        set_cpus_allowed_ptr(current, &saved_mask);
index d43cf8ddff67c13bd0646c728b618f2b39863fc3..083d12688134504d437ea1cade1ea7c41d36ce59 100644 (file)
@@ -314,9 +314,10 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
        case SVC_POOL_PERCPU:
        {
                unsigned int cpu = m->pool_to[pidx];
+               cpumask_of_cpu_ptr(cpumask, cpu);
 
                *oldmask = current->cpus_allowed;
-               set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+               set_cpus_allowed_ptr(current, cpumask);
                return 1;
        }
        case SVC_POOL_PERNODE: