}
 }
 
+static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
+{
+       /* XXX implement me */
+}
+
 /* Send cross call to all processors mentioned in MASK
  * except self.
  */
 
        if (tlb_type == spitfire)
                spitfire_xcall_deliver(data0, data1, data2, mask);
-       else
+       else if (tlb_type == cheetah || tlb_type == cheetah_plus)
                cheetah_xcall_deliver(data0, data1, data2, mask);
+       else
+               hypervisor_xcall_deliver(data0, data1, data2, mask);
        /* NOTE: Caller runs local copy on master. */
 
        put_cpu();
 void smp_flush_dcache_page_impl(struct page *page, int cpu)
 {
        cpumask_t mask = cpumask_of_cpu(cpu);
-       int this_cpu = get_cpu();
+       int this_cpu;
+
+       if (tlb_type == hypervisor)
+               return;
 
 #ifdef CONFIG_DEBUG_DCFLUSH
        atomic_inc(&dcpage_flushes);
 #endif
+
+       this_cpu = get_cpu();
+
        if (cpu == this_cpu) {
                __local_flush_dcache_page(page);
        } else if (cpu_online(cpu)) {
                                               __pa(pg_addr),
                                               (u64) pg_addr,
                                               mask);
-               } else {
+               } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 #ifdef DCACHE_ALIASING_POSSIBLE
                        data0 =
                                ((u64)&xcall_flush_dcache_page_cheetah);
        void *pg_addr = page_address(page);
        cpumask_t mask = cpu_online_map;
        u64 data0;
-       int this_cpu = get_cpu();
+       int this_cpu;
+
+       if (tlb_type == hypervisor)
+               return;
+
+       this_cpu = get_cpu();
 
        cpu_clear(this_cpu, mask);
 
                                       __pa(pg_addr),
                                       (u64) pg_addr,
                                       mask);
-       } else {
+       } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 #ifdef DCACHE_ALIASING_POSSIBLE
                data0 = ((u64)&xcall_flush_dcache_page_cheetah);
                cheetah_xcall_deliver(data0,
 
                if (tlb_type == spitfire)
                        spitfire_xcall_deliver(data0, 0, 0, mask);
-               else
+               else if (tlb_type == cheetah || tlb_type == cheetah_plus)
                        cheetah_xcall_deliver(data0, 0, 0, mask);
+               else if (tlb_type == hypervisor)
+                       hypervisor_xcall_deliver(data0, 0, 0, mask);
        }
 }
 
 
 
 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
 {
-       /* Cheetah has coherent I-cache. */
+       /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
        if (tlb_type == spitfire) {
                unsigned long kaddr;
 
                seq_printf(m, "MMU Type\t: Cheetah+\n");
        else if (tlb_type == spitfire)
                seq_printf(m, "MMU Type\t: Spitfire\n");
+       else if (tlb_type == hypervisor)
+               seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
        else
                seq_printf(m, "MMU Type\t: ???\n");
 
                        if (++n >= 512)
                                break;
                }
-       } else {
+       } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
                start = __pa(start);
                end = __pa(end);
                for (va = start; va < end; va += 32)