#include <linux/smp.h>
#include <linux/module.h>
#include <linux/percpu.h>
+#include <linux/bootmem.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include "cpu.h"
+DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
+EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
+
DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
int cpu = smp_processor_id();
struct tss_struct * t = &per_cpu(init_tss, cpu);
struct thread_struct *thread = ¤t->thread;
- struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ struct desc_struct *gdt;
__u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
if (cpu_test_and_set(cpu, cpu_initialized)) {
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
set_in_cr4(X86_CR4_TSD);
}
+ /*
+ * This is a horrible hack to allocate the GDT. The problem
+ * is that cpu_init() is called really early for the boot CPU
+ * (and hence needs bootmem) but much later for the secondary
+ * CPUs, when bootmem will have gone away
+ */
+ if (NODE_DATA(0)->bdata->node_bootmem_map) {
+ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
+ /* alloc_bootmem_pages panics on failure, so no check */
+ memset(gdt, 0, PAGE_SIZE);
+ } else {
+ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
+ if (unlikely(!gdt)) {
+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
+ for (;;)
+ local_irq_enable();
+ }
+ }
+
/*
* Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor:
((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
(CPU_16BIT_STACK_SIZE - 1);
- cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
- cpu_gdt_descr[cpu].address = (unsigned long)gdt;
+ cpu_gdt_descr->size = GDT_SIZE - 1;
+ cpu_gdt_descr->address = (unsigned long)gdt;
- load_gdt(&cpu_gdt_descr[cpu]);
+ load_gdt(cpu_gdt_descr);
load_idt(&idt_descr);
/*
*/
local_flush_tlb();
- cpu_gdt_descr[0].address = __pa(cpu_gdt_descr[0].address);
- load_gdt((struct Xgt_desc_struct *) __pa(&cpu_gdt_descr[0]));
+ per_cpu(cpu_gdt_descr, 0).address =
+ __pa(per_cpu(cpu_gdt_descr, 0).address);
+ load_gdt((struct Xgt_desc_struct *)__pa(&per_cpu(cpu_gdt_descr, 0)));
}
static void efi_call_phys_epilog(void)
{
unsigned long cr4;
- cpu_gdt_descr[0].address =
- (unsigned long) __va(cpu_gdt_descr[0].address);
- load_gdt(&cpu_gdt_descr[0]);
+ per_cpu(cpu_gdt_descr, 0).address =
+ (unsigned long)__va(per_cpu(cpu_gdt_descr, 0).address);
+ load_gdt((struct Xgt_desc_struct *)__va(&per_cpu(cpu_gdt_descr, 0)));
+
cr4 = read_cr4();
if (cr4 & X86_CR4_PSE) {
unsigned short pad;
} __attribute__ ((packed));
-extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
+extern struct Xgt_desc_struct idt_descr;
+DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
+
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
{
- return ((struct desc_struct *)cpu_gdt_descr[cpu].address);
+ return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
}
#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))