return likely(n >= 0 && n < KVM_MAX_VCPUS);
 }
 
-int kvm_read_guest(struct kvm_vcpu *vcpu,
-                            gva_t addr,
-                            unsigned long size,
-                            void *dest)
+int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
+                  void *dest)
 {
        unsigned char *host_buf = dest;
        unsigned long req_size = size;
 }
 EXPORT_SYMBOL_GPL(kvm_read_guest);
 
-int kvm_write_guest(struct kvm_vcpu *vcpu,
-                            gva_t addr,
-                            unsigned long size,
-                            void *data)
+int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
+                   void *data)
 {
        unsigned char *host_buf = data;
        unsigned long req_size = size;
 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
        if (is_long_mode(vcpu)) {
-               if ( cr3 & CR3_L_MODE_RESEVED_BITS) {
+               if (cr3 & CR3_L_MODE_RESEVED_BITS) {
                        printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
                        inject_gp(vcpu);
                        return;
        if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
                goto out;
 
-
        if (any) {
                cleared = 0;
                for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                return X86EMUL_CONTINUE;
        else {
                gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+
                if (gpa == UNMAPPED_GVA)
-                       return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT;
+                       return X86EMUL_PROPAGATE_FAULT;
                vcpu->mmio_needed = 1;
                vcpu->mmio_phys_addr = gpa;
                vcpu->mmio_size = bytes;
        case KVM_GET_API_VERSION:
                r = KVM_API_VERSION;
                break;
-       case KVM_CREATE_VCPU: {
+       case KVM_CREATE_VCPU:
                r = kvm_dev_ioctl_create_vcpu(kvm, arg);
                if (r)
                        goto out;
                break;
-       }
        case KVM_RUN: {
                struct kvm_run kvm_run;
 
 
 
                addr_mask = io_adress(vcpu, _in, &kvm_run->io.address);
                if (!addr_mask) {
-                       printk(KERN_DEBUG "%s: get io address failed\n", __FUNCTION__);
+                       printk(KERN_DEBUG "%s: get io address failed\n",
+                              __FUNCTION__);
                        return 1;
                }
 
                if (kvm_run->io.rep) {
-                       kvm_run->io.count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
+                       kvm_run->io.count
+                               = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
                        kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags
                                                   & X86_EFLAGS_DF) != 0;
                }
-       } else {
+       } else
                kvm_run->io.value = vcpu->svm->vmcb->save.rax;
-       }
        return 0;
 }
 
-
 static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        return 1;
 
 
 #include "segment_descriptor.h"
 
-
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
        case MSR_IA32_SYSENTER_ESP:
                vmcs_write32(GUEST_SYSENTER_ESP, data);
                break;
-       case MSR_IA32_TIME_STAMP_COUNTER: {
+       case MSR_IA32_TIME_STAMP_COUNTER:
                guest_write_tsc(data);
                break;
-       }
        default:
                msr = find_msr_entry(vcpu, msr_index);
                if (msr) {