* sizeof(struct ebt_chainstack));
                if (!newinfo->chainstack)
                        return -ENOMEM;
-               for_each_cpu(i) {
+               for_each_possible_cpu(i) {
                        newinfo->chainstack[i] =
                           vmalloc(udc_cnt * sizeof(struct ebt_chainstack));
                        if (!newinfo->chainstack[i]) {
               sizeof(struct ebt_counter) * nentries);
 
        /* add other counters to those of cpu 0 */
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (cpu == 0)
                        continue;
                counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
 
        vfree(table->entries);
        if (table->chainstack) {
-               for_each_cpu(i)
+               for_each_possible_cpu(i)
                        vfree(table->chainstack[i]);
                vfree(table->chainstack);
        }
        vfree(counterstmp);
        /* can be initialized in translate_table() */
        if (newinfo->chainstack) {
-               for_each_cpu(i)
+               for_each_possible_cpu(i)
                        vfree(newinfo->chainstack[i]);
                vfree(newinfo->chainstack);
        }
        mutex_unlock(&ebt_mutex);
 free_chainstack:
        if (newinfo->chainstack) {
-               for_each_cpu(i)
+               for_each_possible_cpu(i)
                        vfree(newinfo->chainstack[i]);
                vfree(newinfo->chainstack);
        }
        mutex_unlock(&ebt_mutex);
        vfree(table->private->entries);
        if (table->private->chainstack) {
-               for_each_cpu(i)
+               for_each_possible_cpu(i)
                        vfree(table->private->chainstack[i]);
                vfree(table->private->chainstack);
        }
 
         *      Initialise the packet receive queues.
         */
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                struct softnet_data *queue;
 
                queue = &per_cpu(softnet_data, i);
 
 {
        int i;
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                flow_hash_rnd_recalc(i) = 1;
 
        flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
        flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
        add_timer(&flow_hash_rnd_timer);
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                flow_cache_cpu_prepare(i);
 
        hotcpu_notifier(flow_cache_cpu, 0);
 
 
                memset(&ndst, 0, sizeof(ndst));
 
-               for_each_cpu(cpu) {
+               for_each_possible_cpu(cpu) {
                        struct neigh_statistics *st;
 
                        st = per_cpu_ptr(tbl->stats, cpu);
 
 {
        int i;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                struct nrnd_state *state = &per_cpu(net_rand_state,i);
                __net_srandom(state, i+jiffies);
        }
        unsigned long seed[NR_CPUS];
 
        get_random_bytes(seed, sizeof(seed));
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                struct nrnd_state *state = &per_cpu(net_rand_state,i);
                __net_srandom(state, seed[i]);
        }
 
        struct inet_sock *inet;
        int i;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                int err;
 
                err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,
 
        if (!scratches)
                return;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                void *scratch = *per_cpu_ptr(scratches, i);
                if (scratch)
                        vfree(scratch);
 
        ipcomp_scratches = scratches;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
                if (!scratch)
                        return NULL;
        if (!tfms)
                return;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
                crypto_free_tfm(tfm);
        }
        if (!tfms)
                goto error;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
                if (!tfm)
                        goto error;
 
        }
 
        /* And one copy for every other CPU */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                if (newinfo->entries[i] && newinfo->entries[i] != entry0)
                        memcpy(newinfo->entries[i], entry0, newinfo->size);
        }
                           counters,
                           &i);
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (cpu == curcpu)
                        continue;
                i = 0;
 
        struct ip_conntrack_ecache *ecache;
        int cpu;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                ecache = &per_cpu(ip_conntrack_ecache, cpu);
                if (ecache->ct)
                        ip_conntrack_put(ecache->ct);
 
        }
 
        /* And one copy for every other CPU */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                if (newinfo->entries[i] && newinfo->entries[i] != entry0)
                        memcpy(newinfo->entries[i], entry0, newinfo->size);
        }
                          counters,
                          &i);
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (cpu == curcpu)
                        continue;
                i = 0;
 
        int res = 0;
        int cpu;
 
-       for_each_cpu(cpu)
+       for_each_possible_cpu(cpu)
                res += proto->stats[cpu].inuse;
 
        return res;
        unsigned long res = 0;
        int i;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
                res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
        }
 
                memcpy(dst, src, length);
 
                /* Add the other cpus in, one int at a time */
-               for_each_cpu(i) {
+               for_each_possible_cpu(i) {
                        unsigned int j;
 
                        src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
 
        struct sock *sk;
        int err, i, j;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
                                       &per_cpu(__icmpv6_socket, i));
                if (err < 0) {
 {
        int i;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                sock_release(per_cpu(__icmpv6_socket, i));
        }
        inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
 
        if (!scratches)
                return;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                void *scratch = *per_cpu_ptr(scratches, i);
 
                vfree(scratch);
 
        ipcomp6_scratches = scratches;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
                if (!scratch)
                        return NULL;
        if (!tfms)
                return;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
                crypto_free_tfm(tfm);
        }
        if (!tfms)
                goto error;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
                if (!tfm)
                        goto error;
 
        }
 
        /* And one copy for every other CPU */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                if (newinfo->entries[i] && newinfo->entries[i] != entry0)
                        memcpy(newinfo->entries[i], entry0, newinfo->size);
        }
                           counters,
                           &i);
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (cpu == curcpu)
                        continue;
                i = 0;
 
        int res = 0;
        int cpu;
 
-       for_each_cpu(cpu)
+       for_each_possible_cpu(cpu)
                res += proto->stats[cpu].inuse;
 
        return res;
         unsigned long res = 0;
         int i;
  
-        for_each_cpu(i) {
+        for_each_possible_cpu(i) {
                 res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
                 res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
         }
 
        struct nf_conntrack_ecache *ecache;
        int cpu;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                ecache = &per_cpu(nf_conntrack_ecache, cpu);
                if (ecache->ct)
                        nf_ct_put(ecache->ct);
 
 
        newinfo->size = size;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (size <= PAGE_SIZE)
                        newinfo->entries[cpu] = kmalloc_node(size,
                                                        GFP_KERNEL,
 {
        int cpu;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (info->size <= PAGE_SIZE)
                        kfree(info->entries[cpu]);
                else
 
        unsigned long res = 0;
        int i;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                res +=
                    *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
                                         sizeof (unsigned long) * nr));
 
        int cpu;
        int counter = 0;
 
-       for_each_cpu(cpu)
+       for_each_possible_cpu(cpu)
                counter += per_cpu(sockets_in_use, cpu);
 
        /* It can be negative, by the way. 8) */