static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 #ifdef CONFIG_SMP
-       unsigned cpu = smp_processor_id();
-       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-               per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
+       if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
+               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
 #endif
 }
 
                /* stop flush ipis for the previous mm */
                cpu_clear(cpu, prev->cpu_vm_mask);
 #ifdef CONFIG_SMP
-               per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
-               per_cpu(cpu_tlbstate, cpu).active_mm = next;
+               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
+               x86_write_percpu(cpu_tlbstate.active_mm, next);
 #endif
                cpu_set(cpu, next->cpu_vm_mask);
 
        }
 #ifdef CONFIG_SMP
        else {
-               per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
-               BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
+               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
+               BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
 
                if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
                        /* We were in lazy tlb mode and leave_mm disabled
 
  */
 void leave_mm(int cpu)
 {
-       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-               BUG();
-       cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
+       BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK);
+       cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask);
        load_cr3(swapper_pg_dir);
 }
 EXPORT_SYMBOL_GPL(leave_mm);
                 * BUG();
                 */
 
-       if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
-               if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
+       if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
+               if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
                        if (flush_va == TLB_FLUSH_ALL)
                                local_flush_tlb();
                        else
        unsigned long cpu = smp_processor_id();
 
        __flush_tlb_all();
-       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
+       if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY)
                leave_mm(cpu);
 }