unsigned level,
                                             int metaphysical,
                                             unsigned access,
-                                            u64 *parent_pte,
-                                            bool *new_page)
+                                            u64 *parent_pte)
 {
        union kvm_mmu_page_role role;
        unsigned index;
        vcpu->arch.mmu.prefetch_page(vcpu, sp);
        if (!metaphysical)
                rmap_write_protect(vcpu->kvm, gfn);
-       if (new_page)
-               *new_page = 1;
        return sp;
 }
 
                                >> PAGE_SHIFT;
                        new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
                                                     v, level - 1,
-                                                    1, ACC_ALL, &table[index],
-                                                    NULL);
+                                                    1, ACC_ALL, &table[index]);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
                                kvm_release_page_clean(page);
 
                ASSERT(!VALID_PAGE(root));
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
-                                     PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
+                                     PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
                vcpu->arch.mmu.root_hpa = root;
                        root_gfn = 0;
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                      PT32_ROOT_LEVEL, !is_paging(vcpu),
-                                     ACC_ALL, NULL, NULL);
+                                     ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
                vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
 
                u64 shadow_pte;
                int metaphysical;
                gfn_t table_gfn;
-               bool new_page = 0;
 
                shadow_ent = ((u64 *)__va(shadow_addr)) + index;
                if (level == PT_PAGE_TABLE_LEVEL)
                }
                shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
                                               metaphysical, access,
-                                              shadow_ent, &new_page);
-               if (new_page && !metaphysical) {
+                                              shadow_ent);
+               if (!metaphysical) {
                        int r;
                        pt_element_t curr_pte;
                        r = kvm_read_guest_atomic(vcpu->kvm,