unsigned long segment_base(u16 selector);
 
+void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
+void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
+
 static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
 {
        struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
 
        }
 }
 
+static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+                              unsigned long val, int bytes)
+{
+       struct kvm_memory_slot *m;
+       struct page *page;
+       void *virt;
+
+       if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
+               return 0;
+       m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
+       if (!m)
+               return 0;
+       page = gfn_to_page(m, gpa >> PAGE_SHIFT);
+       kvm_mmu_pre_write(vcpu, gpa, bytes);
+       virt = kmap_atomic(page, KM_USER0);
+       memcpy(virt + offset_in_page(gpa), &val, bytes);
+       kunmap_atomic(virt, KM_USER0);
+       kvm_mmu_post_write(vcpu, gpa, bytes);
+       return 1;
+}
+
 static int emulator_write_emulated(unsigned long addr,
                                   unsigned long val,
                                   unsigned int bytes,
        if (gpa == UNMAPPED_GVA)
                return X86EMUL_PROPAGATE_FAULT;
 
+       if (emulator_write_phys(vcpu, gpa, val, bytes))
+               return X86EMUL_CONTINUE;
+
        vcpu->mmio_needed = 1;
        vcpu->mmio_phys_addr = gpa;
        vcpu->mmio_size = bytes;
 
        return init_kvm_mmu(vcpu);
 }
 
+void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
+{
+       pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+}
+
+void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
+{
+}
+
 static void free_mmu_pages(struct kvm_vcpu *vcpu)
 {
        while (!list_empty(&vcpu->free_pages)) {