1 #ifndef __ASM_PARAVIRT_H
2 #define __ASM_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
10 /* Bitmask of what can be clobbered: usually at least eax. */
12 #define CLBR_EAX (1 << 0)
13 #define CLBR_ECX (1 << 1)
14 #define CLBR_EDX (1 << 2)
17 #define CLBR_RSI (1 << 3)
18 #define CLBR_RDI (1 << 4)
19 #define CLBR_R8 (1 << 5)
20 #define CLBR_R9 (1 << 6)
21 #define CLBR_R10 (1 << 7)
22 #define CLBR_R11 (1 << 8)
23 #define CLBR_ANY ((1 << 9) - 1)
24 #include <asm/desc_defs.h>
26 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
27 #define CLBR_ANY ((1 << 3) - 1)
31 #include <linux/types.h>
32 #include <linux/cpumask.h>
33 #include <asm/kmap_types.h>
34 #include <asm/desc_defs.h>
45 unsigned int kernel_rpl;
46 int shared_kernel_pmd;
53 * Patch may replace one of the defined code sequences with
54 * arbitrary code, subject to the same register constraints.
55 * This generally means the code is not free to clobber any
56 * registers other than EAX. The patch function should return
57 * the number of bytes of code generated, as we nop pad the
58 * rest in generic code.
60 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
61 unsigned long addr, unsigned len);
63 /* Basic arch-specific setup */
64 void (*arch_setup)(void);
65 char *(*memory_setup)(void);
66 void (*post_allocator_init)(void);
68 /* Print a banner to identify the environment */
74 /* Set deferred update mode, used for batching operations. */
80 void (*time_init)(void);
82 /* Set and set time of day */
83 unsigned long (*get_wallclock)(void);
84 int (*set_wallclock)(unsigned long);
86 unsigned long long (*sched_clock)(void);
87 unsigned long (*get_tsc_khz)(void);
91 /* hooks for various privileged instructions */
92 unsigned long (*get_debugreg)(int regno);
93 void (*set_debugreg)(int regno, unsigned long value);
97 unsigned long (*read_cr0)(void);
98 void (*write_cr0)(unsigned long);
100 unsigned long (*read_cr4_safe)(void);
101 unsigned long (*read_cr4)(void);
102 void (*write_cr4)(unsigned long);
105 unsigned long (*read_cr8)(void);
106 void (*write_cr8)(unsigned long);
109 /* Segment descriptor handling */
110 void (*load_tr_desc)(void);
111 void (*load_gdt)(const struct desc_ptr *);
112 void (*load_idt)(const struct desc_ptr *);
113 void (*store_gdt)(struct desc_ptr *);
114 void (*store_idt)(struct desc_ptr *);
115 void (*set_ldt)(const void *desc, unsigned entries);
116 unsigned long (*store_tr)(void);
117 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
119 void (*load_gs_index)(unsigned int idx);
121 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
123 void (*write_gdt_entry)(struct desc_struct *,
124 int entrynum, const void *desc, int size);
125 void (*write_idt_entry)(gate_desc *,
126 int entrynum, const gate_desc *gate);
127 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
128 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
130 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
132 void (*set_iopl_mask)(unsigned mask);
134 void (*wbinvd)(void);
135 void (*io_delay)(void);
137 /* cpuid emulation, mostly so that caps bits can be disabled */
138 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
139 unsigned int *ecx, unsigned int *edx);
141 /* MSR, PMC and TSR operations.
142 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
143 u64 (*read_msr)(unsigned int msr, int *err);
144 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
146 u64 (*read_tsc)(void);
147 u64 (*read_pmc)(int counter);
148 unsigned long long (*read_tscp)(unsigned int *aux);
151 * Atomically enable interrupts and return to userspace. This
152 * is only ever used to return to 32-bit processes; in a
153 * 64-bit kernel, it's used for 32-on-64 compat processes, but
154 * never native 64-bit processes. (Jump, not call.)
156 void (*irq_enable_sysexit)(void);
159 * Switch to usermode gs and return to 64-bit usermode using
160 * sysret. Only used in 64-bit kernels to return to 64-bit
161 * processes. Usermode register state, including %rsp, must
162 * already be restored.
164 void (*usergs_sysret64)(void);
167 * Switch to usermode gs and return to 32-bit usermode using
168 * sysret. Used to return to 32-on-64 compat processes.
169 * Other usermode register state, including %esp, must already
172 void (*usergs_sysret32)(void);
174 /* Normal iret. Jump to this with the standard iret stack
178 void (*swapgs)(void);
180 struct pv_lazy_ops lazy_mode;
184 void (*init_IRQ)(void);
187 * Get/set interrupt state. save_fl and restore_fl are only
188 * expected to use X86_EFLAGS_IF; all other bits
189 * returned from save_fl are undefined, and may be ignored by
192 unsigned long (*save_fl)(void);
193 void (*restore_fl)(unsigned long);
194 void (*irq_disable)(void);
195 void (*irq_enable)(void);
196 void (*safe_halt)(void);
200 void (*adjust_exception_frame)(void);
205 #ifdef CONFIG_X86_LOCAL_APIC
207 * Direct APIC operations, principally for VMI. Ideally
208 * these shouldn't be in this interface.
210 void (*apic_write)(unsigned long reg, u32 v);
211 u32 (*apic_read)(unsigned long reg);
212 void (*setup_boot_clock)(void);
213 void (*setup_secondary_clock)(void);
215 void (*startup_ipi_hook)(int phys_apicid,
216 unsigned long start_eip,
217 unsigned long start_esp);
223 * Called before/after init_mm pagetable setup. setup_start
224 * may reset %cr3, and may pre-install parts of the pagetable;
225 * pagetable setup is expected to preserve any existing
228 void (*pagetable_setup_start)(pgd_t *pgd_base);
229 void (*pagetable_setup_done)(pgd_t *pgd_base);
231 unsigned long (*read_cr2)(void);
232 void (*write_cr2)(unsigned long);
234 unsigned long (*read_cr3)(void);
235 void (*write_cr3)(unsigned long);
238 * Hooks for intercepting the creation/use/destruction of an
241 void (*activate_mm)(struct mm_struct *prev,
242 struct mm_struct *next);
243 void (*dup_mmap)(struct mm_struct *oldmm,
244 struct mm_struct *mm);
245 void (*exit_mmap)(struct mm_struct *mm);
249 void (*flush_tlb_user)(void);
250 void (*flush_tlb_kernel)(void);
251 void (*flush_tlb_single)(unsigned long addr);
252 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
255 /* Hooks for allocating and freeing a pagetable top-level */
256 int (*pgd_alloc)(struct mm_struct *mm);
257 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
260 * Hooks for allocating/releasing pagetable pages when they're
261 * attached to a pagetable
263 void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
264 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
265 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
266 void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
267 void (*release_pte)(u32 pfn);
268 void (*release_pmd)(u32 pfn);
269 void (*release_pud)(u32 pfn);
271 /* Pagetable manipulation functions */
272 void (*set_pte)(pte_t *ptep, pte_t pteval);
273 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
274 pte_t *ptep, pte_t pteval);
275 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
276 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
278 void (*pte_update_defer)(struct mm_struct *mm,
279 unsigned long addr, pte_t *ptep);
281 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
283 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
284 pte_t *ptep, pte_t pte);
286 pteval_t (*pte_val)(pte_t);
287 pteval_t (*pte_flags)(pte_t);
288 pte_t (*make_pte)(pteval_t pte);
290 pgdval_t (*pgd_val)(pgd_t);
291 pgd_t (*make_pgd)(pgdval_t pgd);
293 #if PAGETABLE_LEVELS >= 3
294 #ifdef CONFIG_X86_PAE
295 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
296 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
297 pte_t *ptep, pte_t pte);
298 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
300 void (*pmd_clear)(pmd_t *pmdp);
302 #endif /* CONFIG_X86_PAE */
304 void (*set_pud)(pud_t *pudp, pud_t pudval);
306 pmdval_t (*pmd_val)(pmd_t);
307 pmd_t (*make_pmd)(pmdval_t pmd);
309 #if PAGETABLE_LEVELS == 4
310 pudval_t (*pud_val)(pud_t);
311 pud_t (*make_pud)(pudval_t pud);
313 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
314 #endif /* PAGETABLE_LEVELS == 4 */
315 #endif /* PAGETABLE_LEVELS >= 3 */
317 #ifdef CONFIG_HIGHPTE
318 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
321 struct pv_lazy_ops lazy_mode;
325 /* Sometimes the physical address is a pfn, and sometimes its
326 an mfn. We can tell which is which from the index. */
327 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
328 unsigned long phys, pgprot_t flags);
333 int (*spin_is_locked)(struct raw_spinlock *lock);
334 int (*spin_is_contended)(struct raw_spinlock *lock);
335 void (*spin_lock)(struct raw_spinlock *lock);
336 int (*spin_trylock)(struct raw_spinlock *lock);
337 void (*spin_unlock)(struct raw_spinlock *lock);
340 /* This contains all the paravirt structures: we get a convenient
341 * number for each function using the offset which we use to indicate
343 struct paravirt_patch_template {
344 struct pv_init_ops pv_init_ops;
345 struct pv_time_ops pv_time_ops;
346 struct pv_cpu_ops pv_cpu_ops;
347 struct pv_irq_ops pv_irq_ops;
348 struct pv_apic_ops pv_apic_ops;
349 struct pv_mmu_ops pv_mmu_ops;
350 struct pv_lock_ops pv_lock_ops;
353 extern struct pv_info pv_info;
354 extern struct pv_init_ops pv_init_ops;
355 extern struct pv_time_ops pv_time_ops;
356 extern struct pv_cpu_ops pv_cpu_ops;
357 extern struct pv_irq_ops pv_irq_ops;
358 extern struct pv_apic_ops pv_apic_ops;
359 extern struct pv_mmu_ops pv_mmu_ops;
360 extern struct pv_lock_ops pv_lock_ops;
362 #define PARAVIRT_PATCH(x) \
363 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
365 #define paravirt_type(op) \
366 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
367 [paravirt_opptr] "m" (op)
368 #define paravirt_clobber(clobber) \
369 [paravirt_clobber] "i" (clobber)
372 * Generate some code, and mark it as patchable by the
373 * apply_paravirt() alternate instruction patcher.
375 #define _paravirt_alt(insn_string, type, clobber) \
376 "771:\n\t" insn_string "\n" "772:\n" \
377 ".pushsection .parainstructions,\"a\"\n" \
380 " .byte " type "\n" \
381 " .byte 772b-771b\n" \
382 " .short " clobber "\n" \
385 /* Generate patchable code, with the default asm parameters. */
386 #define paravirt_alt(insn_string) \
387 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
389 /* Simple instruction patching code. */
390 #define DEF_NATIVE(ops, name, code) \
391 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
392 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
394 unsigned paravirt_patch_nop(void);
395 unsigned paravirt_patch_ignore(unsigned len);
396 unsigned paravirt_patch_call(void *insnbuf,
397 const void *target, u16 tgt_clobbers,
398 unsigned long addr, u16 site_clobbers,
400 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
401 unsigned long addr, unsigned len);
402 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
403 unsigned long addr, unsigned len);
405 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
406 const char *start, const char *end);
408 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
409 unsigned long addr, unsigned len);
411 int paravirt_disable_iospace(void);
414 * This generates an indirect call based on the operation type number.
415 * The type number, computed in PARAVIRT_PATCH, is derived from the
416 * offset into the paravirt_patch_template structure, and can therefore be
417 * freely converted back into a structure offset.
419 #define PARAVIRT_CALL "call *%[paravirt_opptr];"
422 * These macros are intended to wrap calls through one of the paravirt
423 * ops structs, so that they can be later identified and patched at
426 * Normally, a call to a pv_op function is a simple indirect call:
427 * (pv_op_struct.operations)(args...).
429 * Unfortunately, this is a relatively slow operation for modern CPUs,
430 * because it cannot necessarily determine what the destination
431 * address is. In this case, the address is a runtime constant, so at
432 * the very least we can patch the call to e a simple direct call, or
433 * ideally, patch an inline implementation into the callsite. (Direct
434 * calls are essentially free, because the call and return addresses
435 * are completely predictable.)
437 * For i386, these macros rely on the standard gcc "regparm(3)" calling
438 * convention, in which the first three arguments are placed in %eax,
439 * %edx, %ecx (in that order), and the remaining arguments are placed
440 * on the stack. All caller-save registers (eax,edx,ecx) are expected
441 * to be modified (either clobbered or used for return values).
442 * X86_64, on the other hand, already specifies a register-based calling
443 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
444 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
445 * special handling for dealing with 4 arguments, unlike i386.
446 * However, x86_64 also have to clobber all caller saved registers, which
447 * unfortunately, are quite a bit (r8 - r11)
449 * The call instruction itself is marked by placing its start address
450 * and size into the .parainstructions section, so that
451 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
452 * appropriate patching under the control of the backend pv_init_ops
455 * Unfortunately there's no way to get gcc to generate the args setup
456 * for the call, and then allow the call itself to be generated by an
457 * inline asm. Because of this, we must do the complete arg setup and
458 * return value handling from within these macros. This is fairly
461 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
462 * It could be extended to more arguments, but there would be little
463 * to be gained from that. For each number of arguments, there are
464 * the two VCALL and CALL variants for void and non-void functions.
466 * When there is a return value, the invoker of the macro must specify
467 * the return type. The macro then uses sizeof() on that type to
468 * determine whether its a 32 or 64 bit value, and places the return
469 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
470 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
471 * the return value size.
473 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
474 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
477 * Small structures are passed and returned in registers. The macro
478 * calling convention can't directly deal with this, so the wrapper
479 * functions must do this.
481 * These PVOP_* macros are only defined within this header. This
482 * means that all uses must be wrapped in inline functions. This also
483 * makes sure the incoming and outgoing types are always correct.
486 #define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
487 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
488 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
490 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
491 #define EXTRA_CLOBBERS
492 #define VEXTRA_CLOBBERS
494 #define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
495 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
496 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
497 "=S" (__esi), "=d" (__edx), \
500 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
502 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
503 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
506 #ifdef CONFIG_PARAVIRT_DEBUG
507 #define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
509 #define PVOP_TEST_NULL(op) ((void)op)
512 #define __PVOP_CALL(rettype, op, pre, post, ...) \
516 PVOP_TEST_NULL(op); \
517 /* This is 32-bit specific, but is okay in 64-bit */ \
518 /* since this condition will never hold */ \
519 if (sizeof(rettype) > sizeof(unsigned long)) { \
521 paravirt_alt(PARAVIRT_CALL) \
523 : PVOP_CALL_CLOBBERS \
524 : paravirt_type(op), \
525 paravirt_clobber(CLBR_ANY), \
527 : "memory", "cc" EXTRA_CLOBBERS); \
528 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
531 paravirt_alt(PARAVIRT_CALL) \
533 : PVOP_CALL_CLOBBERS \
534 : paravirt_type(op), \
535 paravirt_clobber(CLBR_ANY), \
537 : "memory", "cc" EXTRA_CLOBBERS); \
538 __ret = (rettype)__eax; \
542 #define __PVOP_VCALL(op, pre, post, ...) \
545 PVOP_TEST_NULL(op); \
547 paravirt_alt(PARAVIRT_CALL) \
549 : PVOP_VCALL_CLOBBERS \
550 : paravirt_type(op), \
551 paravirt_clobber(CLBR_ANY), \
553 : "memory", "cc" VEXTRA_CLOBBERS); \
556 #define PVOP_CALL0(rettype, op) \
557 __PVOP_CALL(rettype, op, "", "")
558 #define PVOP_VCALL0(op) \
559 __PVOP_VCALL(op, "", "")
561 #define PVOP_CALL1(rettype, op, arg1) \
562 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
563 #define PVOP_VCALL1(op, arg1) \
564 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
566 #define PVOP_CALL2(rettype, op, arg1, arg2) \
567 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
568 "1" ((unsigned long)(arg2)))
569 #define PVOP_VCALL2(op, arg1, arg2) \
570 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
571 "1" ((unsigned long)(arg2)))
573 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
574 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
575 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
576 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
577 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
578 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
580 /* This is the only difference in x86_64. We can make it much simpler */
582 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
583 __PVOP_CALL(rettype, op, \
584 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
585 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
586 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
587 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
589 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
590 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
591 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
593 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
594 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
595 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
596 "3"((unsigned long)(arg4)))
597 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
598 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
599 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
600 "3"((unsigned long)(arg4)))
603 static inline int paravirt_enabled(void)
605 return pv_info.paravirt_enabled;
608 static inline void load_sp0(struct tss_struct *tss,
609 struct thread_struct *thread)
611 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
614 #define ARCH_SETUP pv_init_ops.arch_setup();
615 static inline unsigned long get_wallclock(void)
617 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
620 static inline int set_wallclock(unsigned long nowtime)
622 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
625 static inline void (*choose_time_init(void))(void)
627 return pv_time_ops.time_init;
630 /* The paravirtualized CPUID instruction. */
631 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
632 unsigned int *ecx, unsigned int *edx)
634 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
638 * These special macros can be used to get or set a debugging register
640 static inline unsigned long paravirt_get_debugreg(int reg)
642 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
644 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
645 static inline void set_debugreg(unsigned long val, int reg)
647 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
650 static inline void clts(void)
652 PVOP_VCALL0(pv_cpu_ops.clts);
655 static inline unsigned long read_cr0(void)
657 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
660 static inline void write_cr0(unsigned long x)
662 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
665 static inline unsigned long read_cr2(void)
667 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
670 static inline void write_cr2(unsigned long x)
672 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
675 static inline unsigned long read_cr3(void)
677 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
680 static inline void write_cr3(unsigned long x)
682 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
685 static inline unsigned long read_cr4(void)
687 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
689 static inline unsigned long read_cr4_safe(void)
691 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
694 static inline void write_cr4(unsigned long x)
696 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
700 static inline unsigned long read_cr8(void)
702 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
705 static inline void write_cr8(unsigned long x)
707 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
711 static inline void raw_safe_halt(void)
713 PVOP_VCALL0(pv_irq_ops.safe_halt);
716 static inline void halt(void)
718 PVOP_VCALL0(pv_irq_ops.safe_halt);
721 static inline void wbinvd(void)
723 PVOP_VCALL0(pv_cpu_ops.wbinvd);
726 #define get_kernel_rpl() (pv_info.kernel_rpl)
728 static inline u64 paravirt_read_msr(unsigned msr, int *err)
730 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
732 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
734 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
737 /* These should all do BUG_ON(_err), but our headers are too tangled. */
738 #define rdmsr(msr, val1, val2) \
741 u64 _l = paravirt_read_msr(msr, &_err); \
746 #define wrmsr(msr, val1, val2) \
748 paravirt_write_msr(msr, val1, val2); \
751 #define rdmsrl(msr, val) \
754 val = paravirt_read_msr(msr, &_err); \
757 #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
758 #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
760 /* rdmsr with exception handling */
761 #define rdmsr_safe(msr, a, b) \
764 u64 _l = paravirt_read_msr(msr, &_err); \
770 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
774 *p = paravirt_read_msr(msr, &err);
778 static inline u64 paravirt_read_tsc(void)
780 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
783 #define rdtscl(low) \
785 u64 _l = paravirt_read_tsc(); \
789 #define rdtscll(val) (val = paravirt_read_tsc())
791 static inline unsigned long long paravirt_sched_clock(void)
793 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
795 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
797 static inline unsigned long long paravirt_read_pmc(int counter)
799 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
802 #define rdpmc(counter, low, high) \
804 u64 _l = paravirt_read_pmc(counter); \
809 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
811 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
814 #define rdtscp(low, high, aux) \
817 unsigned long __val = paravirt_rdtscp(&__aux); \
818 (low) = (u32)__val; \
819 (high) = (u32)(__val >> 32); \
823 #define rdtscpll(val, aux) \
825 unsigned long __aux; \
826 val = paravirt_rdtscp(&__aux); \
830 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
832 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
835 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
837 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
840 static inline void load_TR_desc(void)
842 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
844 static inline void load_gdt(const struct desc_ptr *dtr)
846 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
848 static inline void load_idt(const struct desc_ptr *dtr)
850 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
852 static inline void set_ldt(const void *addr, unsigned entries)
854 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
856 static inline void store_gdt(struct desc_ptr *dtr)
858 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
860 static inline void store_idt(struct desc_ptr *dtr)
862 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
864 static inline unsigned long paravirt_store_tr(void)
866 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
868 #define store_tr(tr) ((tr) = paravirt_store_tr())
869 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
871 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
875 static inline void load_gs_index(unsigned int gs)
877 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
881 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
884 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
887 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
888 void *desc, int type)
890 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
893 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
895 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
897 static inline void set_iopl_mask(unsigned mask)
899 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
902 /* The paravirtualized I/O functions */
903 static inline void slow_down_io(void)
905 pv_cpu_ops.io_delay();
906 #ifdef REALLY_SLOW_IO
907 pv_cpu_ops.io_delay();
908 pv_cpu_ops.io_delay();
909 pv_cpu_ops.io_delay();
913 #ifdef CONFIG_X86_LOCAL_APIC
915 * Basic functions accessing APICs.
917 static inline void apic_write(unsigned long reg, u32 v)
919 PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
922 static inline u32 apic_read(unsigned long reg)
924 return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
927 static inline void setup_boot_clock(void)
929 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
932 static inline void setup_secondary_clock(void)
934 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
938 static inline void paravirt_post_allocator_init(void)
940 if (pv_init_ops.post_allocator_init)
941 (*pv_init_ops.post_allocator_init)();
944 static inline void paravirt_pagetable_setup_start(pgd_t *base)
946 (*pv_mmu_ops.pagetable_setup_start)(base);
949 static inline void paravirt_pagetable_setup_done(pgd_t *base)
951 (*pv_mmu_ops.pagetable_setup_done)(base);
955 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
956 unsigned long start_esp)
958 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
959 phys_apicid, start_eip, start_esp);
963 static inline void paravirt_activate_mm(struct mm_struct *prev,
964 struct mm_struct *next)
966 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
969 static inline void arch_dup_mmap(struct mm_struct *oldmm,
970 struct mm_struct *mm)
972 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
975 static inline void arch_exit_mmap(struct mm_struct *mm)
977 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
980 static inline void __flush_tlb(void)
982 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
984 static inline void __flush_tlb_global(void)
986 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
988 static inline void __flush_tlb_single(unsigned long addr)
990 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
993 static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
996 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
999 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
1001 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
1004 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1006 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1009 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
1011 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
1013 static inline void paravirt_release_pte(unsigned pfn)
1015 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1018 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
1020 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1023 static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
1024 unsigned start, unsigned count)
1026 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1028 static inline void paravirt_release_pmd(unsigned pfn)
1030 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1033 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
1035 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1037 static inline void paravirt_release_pud(unsigned pfn)
1039 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1042 #ifdef CONFIG_HIGHPTE
1043 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1046 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
1051 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1054 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
1057 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1060 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
1063 static inline pte_t __pte(pteval_t val)
1067 if (sizeof(pteval_t) > sizeof(long))
1068 ret = PVOP_CALL2(pteval_t,
1069 pv_mmu_ops.make_pte,
1070 val, (u64)val >> 32);
1072 ret = PVOP_CALL1(pteval_t,
1073 pv_mmu_ops.make_pte,
1076 return (pte_t) { .pte = ret };
1079 static inline pteval_t pte_val(pte_t pte)
1083 if (sizeof(pteval_t) > sizeof(long))
1084 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
1085 pte.pte, (u64)pte.pte >> 32);
1087 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1093 static inline pteval_t pte_flags(pte_t pte)
1097 if (sizeof(pteval_t) > sizeof(long))
1098 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1099 pte.pte, (u64)pte.pte >> 32);
1101 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1104 #ifdef CONFIG_PARAVIRT_DEBUG
1105 BUG_ON(ret & PTE_PFN_MASK);
1110 static inline pgd_t __pgd(pgdval_t val)
1114 if (sizeof(pgdval_t) > sizeof(long))
1115 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
1116 val, (u64)val >> 32);
1118 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
1121 return (pgd_t) { ret };
1124 static inline pgdval_t pgd_val(pgd_t pgd)
1128 if (sizeof(pgdval_t) > sizeof(long))
1129 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
1130 pgd.pgd, (u64)pgd.pgd >> 32);
1132 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
1138 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1139 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1144 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1147 return (pte_t) { .pte = ret };
1150 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1151 pte_t *ptep, pte_t pte)
1153 if (sizeof(pteval_t) > sizeof(long))
1155 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1157 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1158 mm, addr, ptep, pte.pte);
1161 static inline void set_pte(pte_t *ptep, pte_t pte)
1163 if (sizeof(pteval_t) > sizeof(long))
1164 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1165 pte.pte, (u64)pte.pte >> 32);
1167 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1171 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1172 pte_t *ptep, pte_t pte)
1174 if (sizeof(pteval_t) > sizeof(long))
1176 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1178 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1181 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1183 pmdval_t val = native_pmd_val(pmd);
1185 if (sizeof(pmdval_t) > sizeof(long))
1186 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1188 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1191 #if PAGETABLE_LEVELS >= 3
1192 static inline pmd_t __pmd(pmdval_t val)
1196 if (sizeof(pmdval_t) > sizeof(long))
1197 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
1198 val, (u64)val >> 32);
1200 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
1203 return (pmd_t) { ret };
1206 static inline pmdval_t pmd_val(pmd_t pmd)
1210 if (sizeof(pmdval_t) > sizeof(long))
1211 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
1212 pmd.pmd, (u64)pmd.pmd >> 32);
1214 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
1220 static inline void set_pud(pud_t *pudp, pud_t pud)
1222 pudval_t val = native_pud_val(pud);
1224 if (sizeof(pudval_t) > sizeof(long))
1225 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1226 val, (u64)val >> 32);
1228 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1231 #if PAGETABLE_LEVELS == 4
1232 static inline pud_t __pud(pudval_t val)
1236 if (sizeof(pudval_t) > sizeof(long))
1237 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
1238 val, (u64)val >> 32);
1240 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
1243 return (pud_t) { ret };
1246 static inline pudval_t pud_val(pud_t pud)
1250 if (sizeof(pudval_t) > sizeof(long))
1251 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
1252 pud.pud, (u64)pud.pud >> 32);
1254 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
1260 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1262 pgdval_t val = native_pgd_val(pgd);
1264 if (sizeof(pgdval_t) > sizeof(long))
1265 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1266 val, (u64)val >> 32);
1268 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1272 static inline void pgd_clear(pgd_t *pgdp)
1274 set_pgd(pgdp, __pgd(0));
1277 static inline void pud_clear(pud_t *pudp)
1279 set_pud(pudp, __pud(0));
1282 #endif /* PAGETABLE_LEVELS == 4 */
1284 #endif /* PAGETABLE_LEVELS >= 3 */
1286 #ifdef CONFIG_X86_PAE
1287 /* Special-case pte-setting operations for PAE, which can't update a
1288 64-bit pte atomically */
1289 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1291 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1292 pte.pte, pte.pte >> 32);
1295 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1296 pte_t *ptep, pte_t pte)
1299 pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1302 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1305 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1308 static inline void pmd_clear(pmd_t *pmdp)
1310 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1312 #else /* !CONFIG_X86_PAE */
1313 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1318 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1319 pte_t *ptep, pte_t pte)
1324 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1327 set_pte_at(mm, addr, ptep, __pte(0));
1330 static inline void pmd_clear(pmd_t *pmdp)
1332 set_pmd(pmdp, __pmd(0));
1334 #endif /* CONFIG_X86_PAE */
1336 /* Lazy mode for batching updates / context switch */
1337 enum paravirt_lazy_mode {
1343 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1344 void paravirt_enter_lazy_cpu(void);
1345 void paravirt_leave_lazy_cpu(void);
1346 void paravirt_enter_lazy_mmu(void);
1347 void paravirt_leave_lazy_mmu(void);
1348 void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1350 #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1351 static inline void arch_enter_lazy_cpu_mode(void)
1353 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1356 static inline void arch_leave_lazy_cpu_mode(void)
1358 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1361 static inline void arch_flush_lazy_cpu_mode(void)
1363 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1364 arch_leave_lazy_cpu_mode();
1365 arch_enter_lazy_cpu_mode();
1370 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1371 static inline void arch_enter_lazy_mmu_mode(void)
1373 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1376 static inline void arch_leave_lazy_mmu_mode(void)
1378 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1381 static inline void arch_flush_lazy_mmu_mode(void)
1383 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1384 arch_leave_lazy_mmu_mode();
1385 arch_enter_lazy_mmu_mode();
1389 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1390 unsigned long phys, pgprot_t flags)
1392 pv_mmu_ops.set_fixmap(idx, phys, flags);
1395 void _paravirt_nop(void);
1396 #define paravirt_nop ((void *)_paravirt_nop)
1398 void paravirt_use_bytelocks(void);
1402 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1404 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1407 static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1409 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1412 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1414 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1417 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1419 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1422 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1424 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1429 /* These all sit in the .parainstructions section to tell us what to patch. */
1430 struct paravirt_patch_site {
1431 u8 *instr; /* original instructions */
1432 u8 instrtype; /* type of this instruction */
1433 u8 len; /* length of original instruction */
1434 u16 clobbers; /* what registers you may clobber */
1437 extern struct paravirt_patch_site __parainstructions[],
1438 __parainstructions_end[];
1440 #ifdef CONFIG_X86_32
1441 #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
1442 #define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
1443 #define PV_FLAGS_ARG "0"
1444 #define PV_EXTRA_CLOBBERS
1445 #define PV_VEXTRA_CLOBBERS
1447 /* We save some registers, but all of them, that's too much. We clobber all
1448 * caller saved registers but the argument parameter */
1449 #define PV_SAVE_REGS "pushq %%rdi;"
1450 #define PV_RESTORE_REGS "popq %%rdi;"
1451 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1452 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1453 #define PV_FLAGS_ARG "D"
1456 static inline unsigned long __raw_local_save_flags(void)
1460 asm volatile(paravirt_alt(PV_SAVE_REGS
1464 : paravirt_type(pv_irq_ops.save_fl),
1465 paravirt_clobber(CLBR_EAX)
1466 : "memory", "cc" PV_VEXTRA_CLOBBERS);
1470 static inline void raw_local_irq_restore(unsigned long f)
1472 asm volatile(paravirt_alt(PV_SAVE_REGS
1477 paravirt_type(pv_irq_ops.restore_fl),
1478 paravirt_clobber(CLBR_EAX)
1479 : "memory", "cc" PV_EXTRA_CLOBBERS);
1482 static inline void raw_local_irq_disable(void)
1484 asm volatile(paravirt_alt(PV_SAVE_REGS
1488 : paravirt_type(pv_irq_ops.irq_disable),
1489 paravirt_clobber(CLBR_EAX)
1490 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1493 static inline void raw_local_irq_enable(void)
1495 asm volatile(paravirt_alt(PV_SAVE_REGS
1499 : paravirt_type(pv_irq_ops.irq_enable),
1500 paravirt_clobber(CLBR_EAX)
1501 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1504 static inline unsigned long __raw_local_irq_save(void)
1508 f = __raw_local_save_flags();
1509 raw_local_irq_disable();
1514 /* Make sure as little as possible of this mess escapes. */
1515 #undef PARAVIRT_CALL
1529 #else /* __ASSEMBLY__ */
1531 #define _PVSITE(ptype, clobbers, ops, word, algn) \
1535 .pushsection .parainstructions,"a"; \
1544 #ifdef CONFIG_X86_64
1545 #define PV_SAVE_REGS \
1555 #define PV_RESTORE_REGS \
1565 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1566 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1567 #define PARA_INDIRECT(addr) *addr(%rip)
1569 #define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1570 #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1571 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1572 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1573 #define PARA_INDIRECT(addr) *%cs:addr
1576 #define INTERRUPT_RETURN \
1577 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1578 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1580 #define DISABLE_INTERRUPTS(clobbers) \
1581 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1583 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1586 #define ENABLE_INTERRUPTS(clobbers) \
1587 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1589 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1592 #define USERGS_SYSRET32 \
1593 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
1595 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1597 #ifdef CONFIG_X86_32
1598 #define GET_CR0_INTO_EAX \
1599 push %ecx; push %edx; \
1600 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1603 #define ENABLE_INTERRUPTS_SYSEXIT \
1604 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1606 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1609 #else /* !CONFIG_X86_32 */
1612 * If swapgs is used while the userspace stack is still current,
1613 * there's no way to call a pvop. The PV replacement *must* be
1614 * inlined, or the swapgs instruction must be trapped and emulated.
1616 #define SWAPGS_UNSAFE_STACK \
1617 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1621 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1623 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1627 #define GET_CR2_INTO_RCX \
1628 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1632 #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1633 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1635 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1637 #define USERGS_SYSRET64 \
1638 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1640 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1642 #define ENABLE_INTERRUPTS_SYSEXIT32 \
1643 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1645 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1646 #endif /* CONFIG_X86_32 */
1648 #endif /* __ASSEMBLY__ */
1649 #endif /* CONFIG_PARAVIRT */
1650 #endif /* __ASM_PARAVIRT_H */