DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
        DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
        DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
-       DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
+       DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
        DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
 
        DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
 
        cmpdi   0,r5,0
        beq     4f
        /* Check for pending interrupts (iSeries) */
-       ld      r3,PACALPPACA+LPPACAANYINT(r13)
+       ld      r3,PACALPPACAPTR(r13)
+       ld      r3,LPPACAANYINT(r3)
        cmpdi   r3,0
        beq+    4f                      /* skip do_IRQ if no interrupts */
 
 
 
 #define EXCEPTION_PROLOG_ISERIES_2                                     \
        mfmsr   r10;                                                    \
-       ld      r11,PACALPPACA+LPPACASRR0(r13);                         \
-       ld      r12,PACALPPACA+LPPACASRR1(r13);                         \
+       ld      r12,PACALPPACAPTR(r13);                                 \
+       ld      r11,LPPACASRR0(r12);                                    \
+       ld      r12,LPPACASRR1(r12);                                    \
        ori     r10,r10,MSR_RI;                                         \
        mtmsrd  r10,1
 
        std     r12,PACA_EXSLB+EX_R12(r13)
        mfspr   r10,SPRN_SPRG1
        std     r10,PACA_EXSLB+EX_R13(r13)
-       ld      r12,PACALPPACA+LPPACASRR1(r13);
+       ld      r12,PACALPPACAPTR(r13)
+       ld      r12,LPPACASRR1(r12)
        b       .slb_miss_realmode
 
        STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
        mtspr   SPRN_SPRG1,r13          /* save r13 */
        mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
        std     r3,PACA_EXSLB+EX_R3(r13)
-       ld      r3,PACALPPACA+LPPACASRR0(r13)   /* get SRR0 value */
+       ld      r3,PACALPPACAPTR(r13)
+       ld      r3,LPPACASRR0(r3)       /* get SRR0 value */
        std     r9,PACA_EXSLB+EX_R9(r13)
        mfcr    r9
 #ifdef __DISABLED__
        std     r12,PACA_EXSLB+EX_R12(r13)
        mfspr   r10,SPRN_SPRG1
        std     r10,PACA_EXSLB+EX_R13(r13)
-       ld      r12,PACALPPACA+LPPACASRR1(r13);
+       ld      r12,PACALPPACAPTR(r13)
+       ld      r12,LPPACASRR1(r12)
        b       .slb_miss_realmode
 
 #ifdef __DISABLED__
        .globl decrementer_iSeries_masked
 decrementer_iSeries_masked:
        li      r11,1
-       stb     r11,PACALPPACA+LPPACADECRINT(r13)
+       ld      r12,PACALPPACAPTR(r13)
+       stb     r11,LPPACADECRINT(r12)
        LOAD_REG_ADDRBASE(r12,tb_ticks_per_jiffy)
        lwz     r12,ADDROFF(tb_ticks_per_jiffy)(r12)
        mtspr   SPRN_DEC,r12
        .globl hardware_interrupt_iSeries_masked
 hardware_interrupt_iSeries_masked:
        mtcrf   0x80,r9         /* Restore regs */
-       ld      r11,PACALPPACA+LPPACASRR0(r13)
-       ld      r12,PACALPPACA+LPPACASRR1(r13)
+       ld      r12,PACALPPACAPTR(r13)
+       ld      r11,LPPACASRR0(r12)
+       ld      r12,LPPACASRR1(r12)
        mtspr   SPRN_SRR0,r11
        mtspr   SPRN_SRR1,r12
        ld      r9,PACA_EXGEN+EX_R9(r13)
        ld      r3,PACA_EXSLB+EX_R3(r13)
        lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
 #ifdef CONFIG_PPC_ISERIES
-       ld      r11,PACALPPACA+LPPACASRR0(r13)  /* get SRR0 value */
+       ld      r11,PACALPPACAPTR(r13)
+       ld      r11,LPPACASRR0(r11)             /* get SRR0 value */
 #endif /* CONFIG_PPC_ISERIES */
 
        mtlr    r10
 
         irq_exit();
 
 #ifdef CONFIG_PPC_ISERIES
-       {
-               struct paca_struct *lpaca = get_paca();
-
-               if (lpaca->lppaca.int_dword.fields.decr_int) {
-                       lpaca->lppaca.int_dword.fields.decr_int = 0;
-                       /* Signal a fake decrementer interrupt */
-                       timer_interrupt(regs);
-               }
+       if (get_lppaca()->int_dword.fields.decr_int) {
+               get_lppaca()->int_dword.fields.decr_int = 0;
+               /* Signal a fake decrementer interrupt */
+               timer_interrupt(regs);
        }
 #endif
 }
 
 {
        unsigned long sum_purr = 0;
        int cpu;
-       struct paca_struct *lpaca;
 
        for_each_cpu(cpu) {
-               lpaca = paca + cpu;
-               sum_purr += lpaca->lppaca.emulated_time_base;
+               sum_purr += lppaca[cpu].emulated_time_base;
 
 #ifdef PURR_DEBUG
                printk(KERN_INFO "get_purr for cpu (%d) has value (%ld) \n",
-                       cpu, lpaca->lppaca.emulated_time_base);
+                       cpu, lppaca[cpu].emulated_time_base);
 #endif
        }
        return sum_purr;
        unsigned long pool_id, lp_index;
        int shared, entitled_capacity, max_entitled_capacity;
        int processors, max_processors;
-       struct paca_struct *lpaca = get_paca();
        unsigned long purr = get_purr();
 
        seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
 
-       shared = (int)(lpaca->lppaca_ptr->shared_proc);
+       shared = (int)(get_lppaca()->shared_proc);
        seq_printf(m, "serial_number=%c%c%c%c%c%c%c\n",
                   e2a(xItExtVpdPanel.mfgID[2]),
                   e2a(xItExtVpdPanel.mfgID[3]),
                           (h_resource >> 0 * 8) & 0xffff);
 
                /* pool related entries are apropriate for shared configs */
-               if (paca[0].lppaca.shared_proc) {
+               if (lppaca[0].shared_proc) {
 
                        h_pic(&pool_idle_time, &pool_procs);
 
        seq_printf(m, "partition_potential_processors=%d\n",
                   partition_potential_processors);
 
-       seq_printf(m, "shared_processor_mode=%d\n", paca[0].lppaca.shared_proc);
+       seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc);
 
        return 0;
 }
 
  * field correctly */
 extern unsigned long __toc_start;
 
+/*
+ * iSeries structure which the hypervisor knows about - this structure
+ * should not cross a page boundary.  The vpa_init/register_vpa call
+ * is now known to fail if the lppaca structure crosses a page
+ * boundary.  The lppaca is also used on POWER5 pSeries boxes.  The
+ * lppaca is 640 bytes long, and cannot readily change since the
+ * hypervisor knows its layout, so a 1kB alignment will suffice to
+ * ensure that it doesn't cross a page boundary.
+ */
+struct lppaca lppaca[] = {
+       [0 ... (NR_CPUS-1)] = {
+               .desc = 0xd397d781,     /* "LpPa" */
+               .size = sizeof(struct lppaca),
+               .dyn_proc_status = 2,
+               .decr_val = 0x00ff0000,
+               .fpregs_in_use = 1,
+               .end_of_quantum = 0xfffffffffffffffful,
+               .slb_count = 64,
+               .vmxregs_in_use = 0,
+       },
+};
+
 /* The Paca is an array with one entry per processor.  Each contains an
  * lppaca, which contains the information shared between the
  * hypervisor and Linux.
  * processor (not thread).
  */
 #define PACA_INIT_COMMON(number, start, asrr, asrv)                        \
+       .lppaca_ptr = &lppaca[number],                                      \
        .lock_token = 0x8000,                                               \
        .paca_index = (number),         /* Paca Index */                    \
        .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL,             \
        .stab_real = (asrr),            /* Real pointer to segment table */ \
        .stab_addr = (asrv),            /* Virt pointer to segment table */ \
        .cpu_start = (start),           /* Processor start */               \
-       .hw_cpu_id = 0xffff,                                                \
-       .lppaca = {                                                         \
-               .desc = 0xd397d781,     /* "LpPa" */                        \
-               .size = sizeof(struct lppaca),                              \
-               .dyn_proc_status = 2,                                       \
-               .decr_val = 0x00ff0000,                                     \
-               .fpregs_in_use = 1,                                         \
-               .end_of_quantum = 0xfffffffffffffffful,                     \
-               .slb_count = 64,                                            \
-               .vmxregs_in_use = 0,                                        \
-       },                                                                  \
+       .hw_cpu_id = 0xffff,
 
 #ifdef CONFIG_PPC_ISERIES
 #define PACA_INIT_ISERIES(number)                                          \
-       .lppaca_ptr = &paca[number].lppaca,                                 \
        .reg_save_ptr = &iseries_reg_save[number],
 
 #define PACA_INIT(number)                                                  \
 
        profile_tick(CPU_PROFILING, regs);
 
 #ifdef CONFIG_PPC_ISERIES
-       get_paca()->lppaca.int_dword.fields.decr_int = 0;
+       get_lppaca()->int_dword.fields.decr_int = 0;
 #endif
 
        while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
 
 void __spin_yield(raw_spinlock_t *lock)
 {
        unsigned int lock_value, holder_cpu, yield_count;
-       struct paca_struct *holder_paca;
 
        lock_value = lock->slock;
        if (lock_value == 0)
                return;
        holder_cpu = lock_value & 0xffff;
        BUG_ON(holder_cpu >= NR_CPUS);
-       holder_paca = &paca[holder_cpu];
-       yield_count = holder_paca->lppaca.yield_count;
+       yield_count = lppaca[holder_cpu].yield_count;
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
 {
        int lock_value;
        unsigned int holder_cpu, yield_count;
-       struct paca_struct *holder_paca;
 
        lock_value = rw->lock;
        if (lock_value >= 0)
                return;         /* no write lock at present */
        holder_cpu = lock_value & 0xffff;
        BUG_ON(holder_cpu >= NR_CPUS);
-       holder_paca = &paca[holder_cpu];
-       yield_count = holder_paca->lppaca.yield_count;
+       yield_count = lppaca[holder_cpu].yield_count;
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
 
  */
 int iSeries_get_irq(struct pt_regs *regs)
 {
-       struct paca_struct *lpaca;
        /* -2 means ignore this interrupt */
        int irq = -2;
 
-       lpaca = get_paca();
 #ifdef CONFIG_SMP
-       if (lpaca->lppaca.int_dword.fields.ipi_cnt) {
-               lpaca->lppaca.int_dword.fields.ipi_cnt = 0;
+       if (get_lppaca()->int_dword.fields.ipi_cnt) {
+               get_lppaca()->int_dword.fields.ipi_cnt = 0;
                iSeries_smp_message_recv(regs);
        }
 #endif /* CONFIG_SMP */
 
        /* Check pending interrupts */
        /*   A decrementer, IPI or PMC interrupt may have occurred
         *   while we were in the hypervisor (which enables) */
-       ld      r4,PACALPPACA+LPPACAANYINT(r13)
+       ld      r4,PACALPPACAPTR(r13)
+       ld      r4,LPPACAANYINT(r4)
        cmpdi   r4,0
        beqlr
 
 
  */
 static void __init iSeries_setup_arch(void)
 {
-       if (get_paca()->lppaca.shared_proc) {
+       if (get_lppaca()->shared_proc) {
                ppc_md.idle_loop = iseries_shared_idle;
                printk(KERN_INFO "Using shared processor idle loop\n");
        } else {
         * The decrementer stops during the yield.  Force a fake decrementer
         * here and let the timer_interrupt code sort out the actual time.
         */
-       get_paca()->lppaca.int_dword.fields.decr_int = 1;
+       get_lppaca()->int_dword.fields.decr_int = 1;
        process_iSeries_events();
 }
 
        pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
 
        for (i = 0; i < NR_CPUS; i++) {
-               if (paca[i].lppaca.dyn_proc_status >= 2)
+               if (lppaca[i].dyn_proc_status >= 2)
                        continue;
 
                snprintf(p, 32 - (p - buf), "@%d", i);
 
                dt_prop_str(dt, "device_type", "cpu");
 
-               index = paca[i].lppaca.dyn_hv_phys_proc_index;
+               index = lppaca[i].dyn_hv_phys_proc_index;
                d = &xIoHriProcessorVpd[index];
 
                dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
 
        BUG_ON((nr < 0) || (nr >= NR_CPUS));
 
        /* Verify that our partition has a processor nr */
-       if (paca[nr].lppaca.dyn_proc_status >= 2)
+       if (lppaca[nr].dyn_proc_status >= 2)
                return;
 
        /* The processor is currently spinning, waiting
 
 void vpa_init(int cpu)
 {
        int hwcpu = get_hard_smp_processor_id(cpu);
-       unsigned long vpa = __pa(&paca[cpu].lppaca);
+       unsigned long vpa = __pa(&lppaca[cpu]);
        long ret;
 
        if (cpu_has_feature(CPU_FTR_ALTIVEC))
-               paca[cpu].lppaca.vmxregs_in_use = 1;
+               lppaca[cpu].vmxregs_in_use = 1;
 
        ret = register_vpa(hwcpu, vpa);
 
 
 
        /* instruct hypervisor to maintain PMCs */
        if (firmware_has_feature(FW_FEATURE_SPLPAR))
-               get_paca()->lppaca.pmcregs_in_use = 1;
+               get_lppaca()->pmcregs_in_use = 1;
 }
 
 static void __init pSeries_setup_arch(void)
        /* Choose an idle loop */
        if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
                vpa_init(boot_cpuid);
-               if (get_paca()->lppaca.shared_proc) {
+               if (get_lppaca()->shared_proc) {
                        printk(KERN_INFO "Using shared processor idle loop\n");
                        ppc_md.idle_loop = pseries_shared_idle;
                } else {
 
 static inline void dedicated_idle_sleep(unsigned int cpu)
 {
-       struct paca_struct *ppaca = &paca[cpu ^ 1];
+       struct lppaca *plppaca = &lppaca[cpu ^ 1];
 
        /* Only sleep if the other thread is not idle */
-       if (!(ppaca->lppaca.idle)) {
+       if (!(plppaca->idle)) {
                local_irq_disable();
 
                /*
 
 static void pseries_dedicated_idle(void)
 { 
-       struct paca_struct *lpaca = get_paca();
        unsigned int cpu = smp_processor_id();
        unsigned long start_snooze;
        unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
                 * Indicate to the HV that we are idle. Now would be
                 * a good time to find other work to dispatch.
                 */
-               lpaca->lppaca.idle = 1;
+               get_lppaca()->idle = 1;
 
                if (!need_resched()) {
                        start_snooze = get_tb() +
                        HMT_medium();
                }
 
-               lpaca->lppaca.idle = 0;
+               get_lppaca()->idle = 0;
                ppc64_runlatch_on();
 
                preempt_enable_no_resched();
 
 static void pseries_shared_idle(void)
 {
-       struct paca_struct *lpaca = get_paca();
        unsigned int cpu = smp_processor_id();
 
        while (1) {
                 * Indicate to the HV that we are idle. Now would be
                 * a good time to find other work to dispatch.
                 */
-               lpaca->lppaca.idle = 1;
+               get_lppaca()->idle = 1;
 
                while (!need_resched() && !cpu_is_offline(cpu)) {
                        local_irq_disable();
                        HMT_medium();
                }
 
-               lpaca->lppaca.idle = 0;
+               get_lppaca()->idle = 0;
                ppc64_runlatch_on();
 
                preempt_enable_no_resched();
 {
        /* Don't risk a hypervisor call if we're crashing */
        if (!crash_shutdown) {
-               unsigned long vpa = __pa(&get_paca()->lppaca);
+               unsigned long vpa = __pa(get_lppaca());
 
                if (unregister_vpa(hard_smp_processor_id(), vpa)) {
                        printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
 
 //----------------------------------------------------------------------------
 #include <asm/types.h>
 
-struct lppaca {
+/* The Hypervisor barfs if the lppaca crosses a page boundary.  A 1k
+ * alignment is sufficient to prevent this */
+struct __attribute__((__aligned__(0x400))) lppaca {
 //=============================================================================
 // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
 // NOTE: The xDynXyz fields are fields that will be dynamically changed by
        u8      pmc_save_area[256];     // PMC interrupt Area           x00-xFF
 };
 
+extern struct lppaca lppaca[];
+
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_LPPACA_H */
 
 
 register struct paca_struct *local_paca asm("r13");
 #define get_paca()     local_paca
+#define get_lppaca()   (get_paca()->lppaca_ptr)
 
 struct task_struct;
 
        u64 saved_r1;                   /* r1 save for RTAS calls */
        u64 saved_msr;                  /* MSR saved here by enter_rtas */
        u8 proc_enabled;                /* irq soft-enable flag */
-
-       /*
-        * iSeries structure which the hypervisor knows about -
-        * this structure should not cross a page boundary.
-        * The vpa_init/register_vpa call is now known to fail if the
-        * lppaca structure crosses a page boundary.
-        * The lppaca is also used on POWER5 pSeries boxes.
-        * The lppaca is 640 bytes long, and cannot readily change
-        * since the hypervisor knows its layout, so a 1kB
-        * alignment will suffice to ensure that it doesn't
-        * cross a page boundary.
-        */
-       struct lppaca lppaca __attribute__((__aligned__(0x400)));
 };
 
 extern struct paca_struct paca[];
 
 
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 /* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
+#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
 extern void __spin_yield(raw_spinlock_t *lock);
 extern void __rw_yield(raw_rwlock_t *lock);
 #else /* SPLPAR || ISERIES */
 
        set_dec_cpu6(val);
 #else
 #ifdef CONFIG_PPC_ISERIES
-       struct paca_struct *lpaca = get_paca();
        int cur_dec;
 
-       if (lpaca->lppaca.shared_proc) {
-               lpaca->lppaca.virtual_decr = val;
+       if (get_lppaca()->shared_proc) {
+               get_lppaca()->virtual_decr = val;
                cur_dec = get_dec();
                if (cur_dec > val)
                        HvCall_setVirtualDecr();