ret = 0;
 
-       switch(msr) {
+       switch (msr) {
 #ifdef CONFIG_X86_64
                unsigned which;
                u64 base;
 
        ident_pte = 0;
        pfn = 0;
-       for(pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
+       for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
                pte_t *pte_page;
 
                /* Reuse or allocate a page of ptes */
                }
 
                /* Install mappings */
-               for(pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
+               for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
                        pte_t pte;
 
                        if (pfn > max_pfn_mapped)
                }
        }
 
-       for(pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
+       for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
                set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
 
        set_page_prot(pmd, PAGE_KERNEL_RO);
 
        /* All levels are converted the same way, so just treat them
           as ptes. */
-       for(i = 0; i < PTRS_PER_PTE; i++)
+       for (i = 0; i < PTRS_PER_PTE; i++)
                pte[i] = xen_make_pte(pte[i].pte);
 }
 
  * of the physical mapping once some sort of allocator has been set
  * up.
  */
-static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+                                               unsigned long max_pfn)
 {
        pud_t *l3;
        pmd_t *l2;
 #else  /* !CONFIG_X86_64 */
 static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
 
-static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+                                               unsigned long max_pfn)
 {
        pmd_t *kernel_pmd;
 
 
 {
        unsigned pfn, idx;
 
-       for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
+       for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
                unsigned topidx = p2m_top_index(pfn);
 
                p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
        }
 
-       for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
+       for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
                unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
                p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
        }
        unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
        unsigned pfn;
 
-       for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
+       for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
                unsigned topidx = p2m_top_index(pfn);
 
                p2m_top[topidx] = &mfn_list[pfn];
        p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
        BUG_ON(p == NULL);
 
-       for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
+       for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
                p[i] = INVALID_P2M_ENTRY;
 
        if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
                preempt_enable();
 }
 
-pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
+                                unsigned long addr, pte_t *ptep)
 {
        /* Just return the pte as-is.  We preserve the bits on commit */
        return *ptep;
 
                if (user_pgd) {
                        xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
-                       xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd)));
+                       xen_do_pin(MMUEXT_PIN_L4_TABLE,
+                                  PFN_DOWN(__pa(user_pgd)));
                }
        }
 #else /* CONFIG_X86_32 */
                pgd_t *user_pgd = xen_get_user_pgd(pgd);
 
                if (user_pgd) {
-                       xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd)));
+                       xen_do_pin(MMUEXT_UNPIN_TABLE,
+                                  PFN_DOWN(__pa(user_pgd)));
                        xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
                }
        }
 
                               ret, smp_processor_id());
                        dump_stack();
                        for (i = 0; i < b->mcidx; i++) {
-                               printk("  call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
+                               printk(KERN_DEBUG "  call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
                                       i+1, b->mcidx,
                                       b->debug[i].op,
                                       b->debug[i].args[0],
 
 /* These are code, but not functions.  Defined in entry.S */
 extern const char xen_hypervisor_callback[];
 extern const char xen_failsafe_callback[];
+extern void xen_sysenter_target(void);
+extern void xen_syscall_target(void);
+extern void xen_syscall32_target(void);
 
 
 /**
 
 void __cpuinit xen_enable_sysenter(void)
 {
-       extern void xen_sysenter_target(void);
        int ret;
        unsigned sysenter_feature;
 
 {
 #ifdef CONFIG_X86_64
        int ret;
-       extern void xen_syscall_target(void);
-       extern void xen_syscall32_target(void);
 
        ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
        if (ret != 0) {
        HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
 
        if (!xen_feature(XENFEAT_auto_translated_physmap))
-               HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3);
+               HYPERVISOR_vm_assist(VMASST_CMD_enable,
+                                    VMASST_TYPE_pae_extended_cr3);
 
        if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
            register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))