]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-x86/paravirt.h
x86/paravirt: add spin_lock_flags lock op
[linux-2.6-omap-h63xx.git] / include / asm-x86 / paravirt.h
1 #ifndef __ASM_PARAVIRT_H
2 #define __ASM_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/page.h>
8 #include <asm/asm.h>
9
10 /* Bitmask of what can be clobbered: usually at least eax. */
11 #define CLBR_NONE 0
12 #define CLBR_EAX  (1 << 0)
13 #define CLBR_ECX  (1 << 1)
14 #define CLBR_EDX  (1 << 2)
15
16 #ifdef CONFIG_X86_64
17 #define CLBR_RSI  (1 << 3)
18 #define CLBR_RDI  (1 << 4)
19 #define CLBR_R8   (1 << 5)
20 #define CLBR_R9   (1 << 6)
21 #define CLBR_R10  (1 << 7)
22 #define CLBR_R11  (1 << 8)
23 #define CLBR_ANY  ((1 << 9) - 1)
24 #include <asm/desc_defs.h>
25 #else
26 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
27 #define CLBR_ANY  ((1 << 3) - 1)
28 #endif /* X86_64 */
29
30 #ifndef __ASSEMBLY__
31 #include <linux/types.h>
32 #include <linux/cpumask.h>
33 #include <asm/kmap_types.h>
34 #include <asm/desc_defs.h>
35
36 struct page;
37 struct thread_struct;
38 struct desc_ptr;
39 struct tss_struct;
40 struct mm_struct;
41 struct desc_struct;
42
43 /* general info */
44 struct pv_info {
45         unsigned int kernel_rpl;
46         int shared_kernel_pmd;
47         int paravirt_enabled;
48         const char *name;
49 };
50
51 struct pv_init_ops {
52         /*
53          * Patch may replace one of the defined code sequences with
54          * arbitrary code, subject to the same register constraints.
55          * This generally means the code is not free to clobber any
56          * registers other than EAX.  The patch function should return
57          * the number of bytes of code generated, as we nop pad the
58          * rest in generic code.
59          */
60         unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
61                           unsigned long addr, unsigned len);
62
63         /* Basic arch-specific setup */
64         void (*arch_setup)(void);
65         char *(*memory_setup)(void);
66         void (*post_allocator_init)(void);
67
68         /* Print a banner to identify the environment */
69         void (*banner)(void);
70 };
71
72
73 struct pv_lazy_ops {
74         /* Set deferred update mode, used for batching operations. */
75         void (*enter)(void);
76         void (*leave)(void);
77 };
78
79 struct pv_time_ops {
80         void (*time_init)(void);
81
82         /* Set and set time of day */
83         unsigned long (*get_wallclock)(void);
84         int (*set_wallclock)(unsigned long);
85
86         unsigned long long (*sched_clock)(void);
87         unsigned long (*get_tsc_khz)(void);
88 };
89
90 struct pv_cpu_ops {
91         /* hooks for various privileged instructions */
92         unsigned long (*get_debugreg)(int regno);
93         void (*set_debugreg)(int regno, unsigned long value);
94
95         void (*clts)(void);
96
97         unsigned long (*read_cr0)(void);
98         void (*write_cr0)(unsigned long);
99
100         unsigned long (*read_cr4_safe)(void);
101         unsigned long (*read_cr4)(void);
102         void (*write_cr4)(unsigned long);
103
104 #ifdef CONFIG_X86_64
105         unsigned long (*read_cr8)(void);
106         void (*write_cr8)(unsigned long);
107 #endif
108
109         /* Segment descriptor handling */
110         void (*load_tr_desc)(void);
111         void (*load_gdt)(const struct desc_ptr *);
112         void (*load_idt)(const struct desc_ptr *);
113         void (*store_gdt)(struct desc_ptr *);
114         void (*store_idt)(struct desc_ptr *);
115         void (*set_ldt)(const void *desc, unsigned entries);
116         unsigned long (*store_tr)(void);
117         void (*load_tls)(struct thread_struct *t, unsigned int cpu);
118 #ifdef CONFIG_X86_64
119         void (*load_gs_index)(unsigned int idx);
120 #endif
121         void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
122                                 const void *desc);
123         void (*write_gdt_entry)(struct desc_struct *,
124                                 int entrynum, const void *desc, int size);
125         void (*write_idt_entry)(gate_desc *,
126                                 int entrynum, const gate_desc *gate);
127         void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
128         void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
129
130         void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
131
132         void (*set_iopl_mask)(unsigned mask);
133
134         void (*wbinvd)(void);
135         void (*io_delay)(void);
136
137         /* cpuid emulation, mostly so that caps bits can be disabled */
138         void (*cpuid)(unsigned int *eax, unsigned int *ebx,
139                       unsigned int *ecx, unsigned int *edx);
140
141         /* MSR, PMC and TSR operations.
142            err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
143         u64 (*read_msr)(unsigned int msr, int *err);
144         int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
145
146         u64 (*read_tsc)(void);
147         u64 (*read_pmc)(int counter);
148         unsigned long long (*read_tscp)(unsigned int *aux);
149
150         /*
151          * Atomically enable interrupts and return to userspace.  This
152          * is only ever used to return to 32-bit processes; in a
153          * 64-bit kernel, it's used for 32-on-64 compat processes, but
154          * never native 64-bit processes.  (Jump, not call.)
155          */
156         void (*irq_enable_sysexit)(void);
157
158         /*
159          * Switch to usermode gs and return to 64-bit usermode using
160          * sysret.  Only used in 64-bit kernels to return to 64-bit
161          * processes.  Usermode register state, including %rsp, must
162          * already be restored.
163          */
164         void (*usergs_sysret64)(void);
165
166         /*
167          * Switch to usermode gs and return to 32-bit usermode using
168          * sysret.  Used to return to 32-on-64 compat processes.
169          * Other usermode register state, including %esp, must already
170          * be restored.
171          */
172         void (*usergs_sysret32)(void);
173
174         /* Normal iret.  Jump to this with the standard iret stack
175            frame set up. */
176         void (*iret)(void);
177
178         void (*swapgs)(void);
179
180         struct pv_lazy_ops lazy_mode;
181 };
182
183 struct pv_irq_ops {
184         void (*init_IRQ)(void);
185
186         /*
187          * Get/set interrupt state.  save_fl and restore_fl are only
188          * expected to use X86_EFLAGS_IF; all other bits
189          * returned from save_fl are undefined, and may be ignored by
190          * restore_fl.
191          */
192         unsigned long (*save_fl)(void);
193         void (*restore_fl)(unsigned long);
194         void (*irq_disable)(void);
195         void (*irq_enable)(void);
196         void (*safe_halt)(void);
197         void (*halt)(void);
198
199 #ifdef CONFIG_X86_64
200         void (*adjust_exception_frame)(void);
201 #endif
202 };
203
204 struct pv_apic_ops {
205 #ifdef CONFIG_X86_LOCAL_APIC
206         /*
207          * Direct APIC operations, principally for VMI.  Ideally
208          * these shouldn't be in this interface.
209          */
210         void (*apic_write)(unsigned long reg, u32 v);
211         u32 (*apic_read)(unsigned long reg);
212         void (*setup_boot_clock)(void);
213         void (*setup_secondary_clock)(void);
214
215         void (*startup_ipi_hook)(int phys_apicid,
216                                  unsigned long start_eip,
217                                  unsigned long start_esp);
218 #endif
219 };
220
221 struct pv_mmu_ops {
222         /*
223          * Called before/after init_mm pagetable setup. setup_start
224          * may reset %cr3, and may pre-install parts of the pagetable;
225          * pagetable setup is expected to preserve any existing
226          * mapping.
227          */
228         void (*pagetable_setup_start)(pgd_t *pgd_base);
229         void (*pagetable_setup_done)(pgd_t *pgd_base);
230
231         unsigned long (*read_cr2)(void);
232         void (*write_cr2)(unsigned long);
233
234         unsigned long (*read_cr3)(void);
235         void (*write_cr3)(unsigned long);
236
237         /*
238          * Hooks for intercepting the creation/use/destruction of an
239          * mm_struct.
240          */
241         void (*activate_mm)(struct mm_struct *prev,
242                             struct mm_struct *next);
243         void (*dup_mmap)(struct mm_struct *oldmm,
244                          struct mm_struct *mm);
245         void (*exit_mmap)(struct mm_struct *mm);
246
247
248         /* TLB operations */
249         void (*flush_tlb_user)(void);
250         void (*flush_tlb_kernel)(void);
251         void (*flush_tlb_single)(unsigned long addr);
252         void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
253                                  unsigned long va);
254
255         /* Hooks for allocating and freeing a pagetable top-level */
256         int  (*pgd_alloc)(struct mm_struct *mm);
257         void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
258
259         /*
260          * Hooks for allocating/releasing pagetable pages when they're
261          * attached to a pagetable
262          */
263         void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
264         void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
265         void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
266         void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
267         void (*release_pte)(u32 pfn);
268         void (*release_pmd)(u32 pfn);
269         void (*release_pud)(u32 pfn);
270
271         /* Pagetable manipulation functions */
272         void (*set_pte)(pte_t *ptep, pte_t pteval);
273         void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
274                            pte_t *ptep, pte_t pteval);
275         void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
276         void (*pte_update)(struct mm_struct *mm, unsigned long addr,
277                            pte_t *ptep);
278         void (*pte_update_defer)(struct mm_struct *mm,
279                                  unsigned long addr, pte_t *ptep);
280
281         pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
282                                         pte_t *ptep);
283         void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
284                                         pte_t *ptep, pte_t pte);
285
286         pteval_t (*pte_val)(pte_t);
287         pteval_t (*pte_flags)(pte_t);
288         pte_t (*make_pte)(pteval_t pte);
289
290         pgdval_t (*pgd_val)(pgd_t);
291         pgd_t (*make_pgd)(pgdval_t pgd);
292
293 #if PAGETABLE_LEVELS >= 3
294 #ifdef CONFIG_X86_PAE
295         void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
296         void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
297                                 pte_t *ptep, pte_t pte);
298         void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
299                           pte_t *ptep);
300         void (*pmd_clear)(pmd_t *pmdp);
301
302 #endif  /* CONFIG_X86_PAE */
303
304         void (*set_pud)(pud_t *pudp, pud_t pudval);
305
306         pmdval_t (*pmd_val)(pmd_t);
307         pmd_t (*make_pmd)(pmdval_t pmd);
308
309 #if PAGETABLE_LEVELS == 4
310         pudval_t (*pud_val)(pud_t);
311         pud_t (*make_pud)(pudval_t pud);
312
313         void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
314 #endif  /* PAGETABLE_LEVELS == 4 */
315 #endif  /* PAGETABLE_LEVELS >= 3 */
316
317 #ifdef CONFIG_HIGHPTE
318         void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
319 #endif
320
321         struct pv_lazy_ops lazy_mode;
322
323         /* dom0 ops */
324
325         /* Sometimes the physical address is a pfn, and sometimes its
326            an mfn.  We can tell which is which from the index. */
327         void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
328                            unsigned long phys, pgprot_t flags);
329 };
330
331 struct raw_spinlock;
332 struct pv_lock_ops {
333         int (*spin_is_locked)(struct raw_spinlock *lock);
334         int (*spin_is_contended)(struct raw_spinlock *lock);
335         void (*spin_lock)(struct raw_spinlock *lock);
336         void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
337         int (*spin_trylock)(struct raw_spinlock *lock);
338         void (*spin_unlock)(struct raw_spinlock *lock);
339 };
340
341 /* This contains all the paravirt structures: we get a convenient
342  * number for each function using the offset which we use to indicate
343  * what to patch. */
344 struct paravirt_patch_template {
345         struct pv_init_ops pv_init_ops;
346         struct pv_time_ops pv_time_ops;
347         struct pv_cpu_ops pv_cpu_ops;
348         struct pv_irq_ops pv_irq_ops;
349         struct pv_apic_ops pv_apic_ops;
350         struct pv_mmu_ops pv_mmu_ops;
351         struct pv_lock_ops pv_lock_ops;
352 };
353
354 extern struct pv_info pv_info;
355 extern struct pv_init_ops pv_init_ops;
356 extern struct pv_time_ops pv_time_ops;
357 extern struct pv_cpu_ops pv_cpu_ops;
358 extern struct pv_irq_ops pv_irq_ops;
359 extern struct pv_apic_ops pv_apic_ops;
360 extern struct pv_mmu_ops pv_mmu_ops;
361 extern struct pv_lock_ops pv_lock_ops;
362
363 #define PARAVIRT_PATCH(x)                                       \
364         (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
365
366 #define paravirt_type(op)                               \
367         [paravirt_typenum] "i" (PARAVIRT_PATCH(op)),    \
368         [paravirt_opptr] "m" (op)
369 #define paravirt_clobber(clobber)               \
370         [paravirt_clobber] "i" (clobber)
371
372 /*
373  * Generate some code, and mark it as patchable by the
374  * apply_paravirt() alternate instruction patcher.
375  */
376 #define _paravirt_alt(insn_string, type, clobber)       \
377         "771:\n\t" insn_string "\n" "772:\n"            \
378         ".pushsection .parainstructions,\"a\"\n"        \
379         _ASM_ALIGN "\n"                                 \
380         _ASM_PTR " 771b\n"                              \
381         "  .byte " type "\n"                            \
382         "  .byte 772b-771b\n"                           \
383         "  .short " clobber "\n"                        \
384         ".popsection\n"
385
386 /* Generate patchable code, with the default asm parameters. */
387 #define paravirt_alt(insn_string)                                       \
388         _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
389
390 /* Simple instruction patching code. */
391 #define DEF_NATIVE(ops, name, code)                                     \
392         extern const char start_##ops##_##name[], end_##ops##_##name[]; \
393         asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
394
395 unsigned paravirt_patch_nop(void);
396 unsigned paravirt_patch_ignore(unsigned len);
397 unsigned paravirt_patch_call(void *insnbuf,
398                              const void *target, u16 tgt_clobbers,
399                              unsigned long addr, u16 site_clobbers,
400                              unsigned len);
401 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
402                             unsigned long addr, unsigned len);
403 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
404                                 unsigned long addr, unsigned len);
405
406 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
407                               const char *start, const char *end);
408
409 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
410                       unsigned long addr, unsigned len);
411
412 int paravirt_disable_iospace(void);
413
414 /*
415  * This generates an indirect call based on the operation type number.
416  * The type number, computed in PARAVIRT_PATCH, is derived from the
417  * offset into the paravirt_patch_template structure, and can therefore be
418  * freely converted back into a structure offset.
419  */
420 #define PARAVIRT_CALL   "call *%[paravirt_opptr];"
421
422 /*
423  * These macros are intended to wrap calls through one of the paravirt
424  * ops structs, so that they can be later identified and patched at
425  * runtime.
426  *
427  * Normally, a call to a pv_op function is a simple indirect call:
428  * (pv_op_struct.operations)(args...).
429  *
430  * Unfortunately, this is a relatively slow operation for modern CPUs,
431  * because it cannot necessarily determine what the destination
432  * address is.  In this case, the address is a runtime constant, so at
433  * the very least we can patch the call to e a simple direct call, or
434  * ideally, patch an inline implementation into the callsite.  (Direct
435  * calls are essentially free, because the call and return addresses
436  * are completely predictable.)
437  *
438  * For i386, these macros rely on the standard gcc "regparm(3)" calling
439  * convention, in which the first three arguments are placed in %eax,
440  * %edx, %ecx (in that order), and the remaining arguments are placed
441  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
442  * to be modified (either clobbered or used for return values).
443  * X86_64, on the other hand, already specifies a register-based calling
444  * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
445  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
446  * special handling for dealing with 4 arguments, unlike i386.
447  * However, x86_64 also have to clobber all caller saved registers, which
448  * unfortunately, are quite a bit (r8 - r11)
449  *
450  * The call instruction itself is marked by placing its start address
451  * and size into the .parainstructions section, so that
452  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
453  * appropriate patching under the control of the backend pv_init_ops
454  * implementation.
455  *
456  * Unfortunately there's no way to get gcc to generate the args setup
457  * for the call, and then allow the call itself to be generated by an
458  * inline asm.  Because of this, we must do the complete arg setup and
459  * return value handling from within these macros.  This is fairly
460  * cumbersome.
461  *
462  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
463  * It could be extended to more arguments, but there would be little
464  * to be gained from that.  For each number of arguments, there are
465  * the two VCALL and CALL variants for void and non-void functions.
466  *
467  * When there is a return value, the invoker of the macro must specify
468  * the return type.  The macro then uses sizeof() on that type to
469  * determine whether its a 32 or 64 bit value, and places the return
470  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
471  * 64-bit). For x86_64 machines, it just returns at %rax regardless of
472  * the return value size.
473  *
474  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
475  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
476  * in low,high order
477  *
478  * Small structures are passed and returned in registers.  The macro
479  * calling convention can't directly deal with this, so the wrapper
480  * functions must do this.
481  *
482  * These PVOP_* macros are only defined within this header.  This
483  * means that all uses must be wrapped in inline functions.  This also
484  * makes sure the incoming and outgoing types are always correct.
485  */
486 #ifdef CONFIG_X86_32
487 #define PVOP_VCALL_ARGS                 unsigned long __eax, __edx, __ecx
488 #define PVOP_CALL_ARGS                  PVOP_VCALL_ARGS
489 #define PVOP_VCALL_CLOBBERS             "=a" (__eax), "=d" (__edx),     \
490                                         "=c" (__ecx)
491 #define PVOP_CALL_CLOBBERS              PVOP_VCALL_CLOBBERS
492 #define EXTRA_CLOBBERS
493 #define VEXTRA_CLOBBERS
494 #else
495 #define PVOP_VCALL_ARGS         unsigned long __edi, __esi, __edx, __ecx
496 #define PVOP_CALL_ARGS          PVOP_VCALL_ARGS, __eax
497 #define PVOP_VCALL_CLOBBERS     "=D" (__edi),                           \
498                                 "=S" (__esi), "=d" (__edx),             \
499                                 "=c" (__ecx)
500
501 #define PVOP_CALL_CLOBBERS      PVOP_VCALL_CLOBBERS, "=a" (__eax)
502
503 #define EXTRA_CLOBBERS   , "r8", "r9", "r10", "r11"
504 #define VEXTRA_CLOBBERS  , "rax", "r8", "r9", "r10", "r11"
505 #endif
506
507 #ifdef CONFIG_PARAVIRT_DEBUG
508 #define PVOP_TEST_NULL(op)      BUG_ON(op == NULL)
509 #else
510 #define PVOP_TEST_NULL(op)      ((void)op)
511 #endif
512
513 #define __PVOP_CALL(rettype, op, pre, post, ...)                        \
514         ({                                                              \
515                 rettype __ret;                                          \
516                 PVOP_CALL_ARGS;                                 \
517                 PVOP_TEST_NULL(op);                                     \
518                 /* This is 32-bit specific, but is okay in 64-bit */    \
519                 /* since this condition will never hold */              \
520                 if (sizeof(rettype) > sizeof(unsigned long)) {          \
521                         asm volatile(pre                                \
522                                      paravirt_alt(PARAVIRT_CALL)        \
523                                      post                               \
524                                      : PVOP_CALL_CLOBBERS               \
525                                      : paravirt_type(op),               \
526                                        paravirt_clobber(CLBR_ANY),      \
527                                        ##__VA_ARGS__                    \
528                                      : "memory", "cc" EXTRA_CLOBBERS);  \
529                         __ret = (rettype)((((u64)__edx) << 32) | __eax); \
530                 } else {                                                \
531                         asm volatile(pre                                \
532                                      paravirt_alt(PARAVIRT_CALL)        \
533                                      post                               \
534                                      : PVOP_CALL_CLOBBERS               \
535                                      : paravirt_type(op),               \
536                                        paravirt_clobber(CLBR_ANY),      \
537                                        ##__VA_ARGS__                    \
538                                      : "memory", "cc" EXTRA_CLOBBERS);  \
539                         __ret = (rettype)__eax;                         \
540                 }                                                       \
541                 __ret;                                                  \
542         })
543 #define __PVOP_VCALL(op, pre, post, ...)                                \
544         ({                                                              \
545                 PVOP_VCALL_ARGS;                                        \
546                 PVOP_TEST_NULL(op);                                     \
547                 asm volatile(pre                                        \
548                              paravirt_alt(PARAVIRT_CALL)                \
549                              post                                       \
550                              : PVOP_VCALL_CLOBBERS                      \
551                              : paravirt_type(op),                       \
552                                paravirt_clobber(CLBR_ANY),              \
553                                ##__VA_ARGS__                            \
554                              : "memory", "cc" VEXTRA_CLOBBERS);         \
555         })
556
557 #define PVOP_CALL0(rettype, op)                                         \
558         __PVOP_CALL(rettype, op, "", "")
559 #define PVOP_VCALL0(op)                                                 \
560         __PVOP_VCALL(op, "", "")
561
562 #define PVOP_CALL1(rettype, op, arg1)                                   \
563         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
564 #define PVOP_VCALL1(op, arg1)                                           \
565         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
566
567 #define PVOP_CALL2(rettype, op, arg1, arg2)                             \
568         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
569         "1" ((unsigned long)(arg2)))
570 #define PVOP_VCALL2(op, arg1, arg2)                                     \
571         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
572         "1" ((unsigned long)(arg2)))
573
574 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                       \
575         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
576         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
577 #define PVOP_VCALL3(op, arg1, arg2, arg3)                               \
578         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
579         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
580
581 /* This is the only difference in x86_64. We can make it much simpler */
582 #ifdef CONFIG_X86_32
583 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
584         __PVOP_CALL(rettype, op,                                        \
585                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
586                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
587                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
588 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
589         __PVOP_VCALL(op,                                                \
590                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
591                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
592                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
593 #else
594 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
595         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
596         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
597         "3"((unsigned long)(arg4)))
598 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
599         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
600         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
601         "3"((unsigned long)(arg4)))
602 #endif
603
604 static inline int paravirt_enabled(void)
605 {
606         return pv_info.paravirt_enabled;
607 }
608
609 static inline void load_sp0(struct tss_struct *tss,
610                              struct thread_struct *thread)
611 {
612         PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
613 }
614
615 #define ARCH_SETUP                      pv_init_ops.arch_setup();
616 static inline unsigned long get_wallclock(void)
617 {
618         return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
619 }
620
621 static inline int set_wallclock(unsigned long nowtime)
622 {
623         return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
624 }
625
626 static inline void (*choose_time_init(void))(void)
627 {
628         return pv_time_ops.time_init;
629 }
630
631 /* The paravirtualized CPUID instruction. */
632 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
633                            unsigned int *ecx, unsigned int *edx)
634 {
635         PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
636 }
637
638 /*
639  * These special macros can be used to get or set a debugging register
640  */
641 static inline unsigned long paravirt_get_debugreg(int reg)
642 {
643         return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
644 }
645 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
646 static inline void set_debugreg(unsigned long val, int reg)
647 {
648         PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
649 }
650
651 static inline void clts(void)
652 {
653         PVOP_VCALL0(pv_cpu_ops.clts);
654 }
655
656 static inline unsigned long read_cr0(void)
657 {
658         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
659 }
660
661 static inline void write_cr0(unsigned long x)
662 {
663         PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
664 }
665
666 static inline unsigned long read_cr2(void)
667 {
668         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
669 }
670
671 static inline void write_cr2(unsigned long x)
672 {
673         PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
674 }
675
676 static inline unsigned long read_cr3(void)
677 {
678         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
679 }
680
681 static inline void write_cr3(unsigned long x)
682 {
683         PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
684 }
685
686 static inline unsigned long read_cr4(void)
687 {
688         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
689 }
690 static inline unsigned long read_cr4_safe(void)
691 {
692         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
693 }
694
695 static inline void write_cr4(unsigned long x)
696 {
697         PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
698 }
699
700 #ifdef CONFIG_X86_64
701 static inline unsigned long read_cr8(void)
702 {
703         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
704 }
705
706 static inline void write_cr8(unsigned long x)
707 {
708         PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
709 }
710 #endif
711
712 static inline void raw_safe_halt(void)
713 {
714         PVOP_VCALL0(pv_irq_ops.safe_halt);
715 }
716
717 static inline void halt(void)
718 {
719         PVOP_VCALL0(pv_irq_ops.safe_halt);
720 }
721
722 static inline void wbinvd(void)
723 {
724         PVOP_VCALL0(pv_cpu_ops.wbinvd);
725 }
726
727 #define get_kernel_rpl()  (pv_info.kernel_rpl)
728
729 static inline u64 paravirt_read_msr(unsigned msr, int *err)
730 {
731         return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
732 }
733 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
734 {
735         return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
736 }
737
738 /* These should all do BUG_ON(_err), but our headers are too tangled. */
739 #define rdmsr(msr, val1, val2)                  \
740 do {                                            \
741         int _err;                               \
742         u64 _l = paravirt_read_msr(msr, &_err); \
743         val1 = (u32)_l;                         \
744         val2 = _l >> 32;                        \
745 } while (0)
746
747 #define wrmsr(msr, val1, val2)                  \
748 do {                                            \
749         paravirt_write_msr(msr, val1, val2);    \
750 } while (0)
751
752 #define rdmsrl(msr, val)                        \
753 do {                                            \
754         int _err;                               \
755         val = paravirt_read_msr(msr, &_err);    \
756 } while (0)
757
758 #define wrmsrl(msr, val)        wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
759 #define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
760
761 /* rdmsr with exception handling */
762 #define rdmsr_safe(msr, a, b)                   \
763 ({                                              \
764         int _err;                               \
765         u64 _l = paravirt_read_msr(msr, &_err); \
766         (*a) = (u32)_l;                         \
767         (*b) = _l >> 32;                        \
768         _err;                                   \
769 })
770
771 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
772 {
773         int err;
774
775         *p = paravirt_read_msr(msr, &err);
776         return err;
777 }
778
779 static inline u64 paravirt_read_tsc(void)
780 {
781         return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
782 }
783
784 #define rdtscl(low)                             \
785 do {                                            \
786         u64 _l = paravirt_read_tsc();           \
787         low = (int)_l;                          \
788 } while (0)
789
790 #define rdtscll(val) (val = paravirt_read_tsc())
791
792 static inline unsigned long long paravirt_sched_clock(void)
793 {
794         return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
795 }
796 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
797
798 static inline unsigned long long paravirt_read_pmc(int counter)
799 {
800         return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
801 }
802
803 #define rdpmc(counter, low, high)               \
804 do {                                            \
805         u64 _l = paravirt_read_pmc(counter);    \
806         low = (u32)_l;                          \
807         high = _l >> 32;                        \
808 } while (0)
809
810 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
811 {
812         return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
813 }
814
815 #define rdtscp(low, high, aux)                          \
816 do {                                                    \
817         int __aux;                                      \
818         unsigned long __val = paravirt_rdtscp(&__aux);  \
819         (low) = (u32)__val;                             \
820         (high) = (u32)(__val >> 32);                    \
821         (aux) = __aux;                                  \
822 } while (0)
823
824 #define rdtscpll(val, aux)                              \
825 do {                                                    \
826         unsigned long __aux;                            \
827         val = paravirt_rdtscp(&__aux);                  \
828         (aux) = __aux;                                  \
829 } while (0)
830
831 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
832 {
833         PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
834 }
835
836 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
837 {
838         PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
839 }
840
841 static inline void load_TR_desc(void)
842 {
843         PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
844 }
845 static inline void load_gdt(const struct desc_ptr *dtr)
846 {
847         PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
848 }
849 static inline void load_idt(const struct desc_ptr *dtr)
850 {
851         PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
852 }
853 static inline void set_ldt(const void *addr, unsigned entries)
854 {
855         PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
856 }
857 static inline void store_gdt(struct desc_ptr *dtr)
858 {
859         PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
860 }
861 static inline void store_idt(struct desc_ptr *dtr)
862 {
863         PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
864 }
865 static inline unsigned long paravirt_store_tr(void)
866 {
867         return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
868 }
869 #define store_tr(tr)    ((tr) = paravirt_store_tr())
870 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
871 {
872         PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
873 }
874
875 #ifdef CONFIG_X86_64
876 static inline void load_gs_index(unsigned int gs)
877 {
878         PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
879 }
880 #endif
881
882 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
883                                    const void *desc)
884 {
885         PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
886 }
887
888 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
889                                    void *desc, int type)
890 {
891         PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
892 }
893
894 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
895 {
896         PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
897 }
898 static inline void set_iopl_mask(unsigned mask)
899 {
900         PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
901 }
902
903 /* The paravirtualized I/O functions */
904 static inline void slow_down_io(void)
905 {
906         pv_cpu_ops.io_delay();
907 #ifdef REALLY_SLOW_IO
908         pv_cpu_ops.io_delay();
909         pv_cpu_ops.io_delay();
910         pv_cpu_ops.io_delay();
911 #endif
912 }
913
914 #ifdef CONFIG_X86_LOCAL_APIC
915 /*
916  * Basic functions accessing APICs.
917  */
918 static inline void apic_write(unsigned long reg, u32 v)
919 {
920         PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
921 }
922
923 static inline u32 apic_read(unsigned long reg)
924 {
925         return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
926 }
927
928 static inline void setup_boot_clock(void)
929 {
930         PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
931 }
932
933 static inline void setup_secondary_clock(void)
934 {
935         PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
936 }
937 #endif
938
939 static inline void paravirt_post_allocator_init(void)
940 {
941         if (pv_init_ops.post_allocator_init)
942                 (*pv_init_ops.post_allocator_init)();
943 }
944
945 static inline void paravirt_pagetable_setup_start(pgd_t *base)
946 {
947         (*pv_mmu_ops.pagetable_setup_start)(base);
948 }
949
950 static inline void paravirt_pagetable_setup_done(pgd_t *base)
951 {
952         (*pv_mmu_ops.pagetable_setup_done)(base);
953 }
954
955 #ifdef CONFIG_SMP
956 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
957                                     unsigned long start_esp)
958 {
959         PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
960                     phys_apicid, start_eip, start_esp);
961 }
962 #endif
963
964 static inline void paravirt_activate_mm(struct mm_struct *prev,
965                                         struct mm_struct *next)
966 {
967         PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
968 }
969
970 static inline void arch_dup_mmap(struct mm_struct *oldmm,
971                                  struct mm_struct *mm)
972 {
973         PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
974 }
975
976 static inline void arch_exit_mmap(struct mm_struct *mm)
977 {
978         PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
979 }
980
981 static inline void __flush_tlb(void)
982 {
983         PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
984 }
985 static inline void __flush_tlb_global(void)
986 {
987         PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
988 }
989 static inline void __flush_tlb_single(unsigned long addr)
990 {
991         PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
992 }
993
994 static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
995                                     unsigned long va)
996 {
997         PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
998 }
999
1000 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
1001 {
1002         return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
1003 }
1004
1005 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1006 {
1007         PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1008 }
1009
1010 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
1011 {
1012         PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
1013 }
1014 static inline void paravirt_release_pte(unsigned pfn)
1015 {
1016         PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1017 }
1018
1019 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
1020 {
1021         PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1022 }
1023
1024 static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
1025                                             unsigned start, unsigned count)
1026 {
1027         PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1028 }
1029 static inline void paravirt_release_pmd(unsigned pfn)
1030 {
1031         PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1032 }
1033
1034 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
1035 {
1036         PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1037 }
1038 static inline void paravirt_release_pud(unsigned pfn)
1039 {
1040         PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1041 }
1042
1043 #ifdef CONFIG_HIGHPTE
1044 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1045 {
1046         unsigned long ret;
1047         ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
1048         return (void *)ret;
1049 }
1050 #endif
1051
1052 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1053                               pte_t *ptep)
1054 {
1055         PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
1056 }
1057
1058 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1059                                     pte_t *ptep)
1060 {
1061         PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
1062 }
1063
1064 static inline pte_t __pte(pteval_t val)
1065 {
1066         pteval_t ret;
1067
1068         if (sizeof(pteval_t) > sizeof(long))
1069                 ret = PVOP_CALL2(pteval_t,
1070                                  pv_mmu_ops.make_pte,
1071                                  val, (u64)val >> 32);
1072         else
1073                 ret = PVOP_CALL1(pteval_t,
1074                                  pv_mmu_ops.make_pte,
1075                                  val);
1076
1077         return (pte_t) { .pte = ret };
1078 }
1079
1080 static inline pteval_t pte_val(pte_t pte)
1081 {
1082         pteval_t ret;
1083
1084         if (sizeof(pteval_t) > sizeof(long))
1085                 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
1086                                  pte.pte, (u64)pte.pte >> 32);
1087         else
1088                 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1089                                  pte.pte);
1090
1091         return ret;
1092 }
1093
1094 static inline pteval_t pte_flags(pte_t pte)
1095 {
1096         pteval_t ret;
1097
1098         if (sizeof(pteval_t) > sizeof(long))
1099                 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1100                                  pte.pte, (u64)pte.pte >> 32);
1101         else
1102                 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1103                                  pte.pte);
1104
1105 #ifdef CONFIG_PARAVIRT_DEBUG
1106         BUG_ON(ret & PTE_PFN_MASK);
1107 #endif
1108         return ret;
1109 }
1110
1111 static inline pgd_t __pgd(pgdval_t val)
1112 {
1113         pgdval_t ret;
1114
1115         if (sizeof(pgdval_t) > sizeof(long))
1116                 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
1117                                  val, (u64)val >> 32);
1118         else
1119                 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
1120                                  val);
1121
1122         return (pgd_t) { ret };
1123 }
1124
1125 static inline pgdval_t pgd_val(pgd_t pgd)
1126 {
1127         pgdval_t ret;
1128
1129         if (sizeof(pgdval_t) > sizeof(long))
1130                 ret =  PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
1131                                   pgd.pgd, (u64)pgd.pgd >> 32);
1132         else
1133                 ret =  PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
1134                                   pgd.pgd);
1135
1136         return ret;
1137 }
1138
1139 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1140 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1141                                            pte_t *ptep)
1142 {
1143         pteval_t ret;
1144
1145         ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1146                          mm, addr, ptep);
1147
1148         return (pte_t) { .pte = ret };
1149 }
1150
1151 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1152                                            pte_t *ptep, pte_t pte)
1153 {
1154         if (sizeof(pteval_t) > sizeof(long))
1155                 /* 5 arg words */
1156                 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1157         else
1158                 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1159                             mm, addr, ptep, pte.pte);
1160 }
1161
1162 static inline void set_pte(pte_t *ptep, pte_t pte)
1163 {
1164         if (sizeof(pteval_t) > sizeof(long))
1165                 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1166                             pte.pte, (u64)pte.pte >> 32);
1167         else
1168                 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1169                             pte.pte);
1170 }
1171
1172 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1173                               pte_t *ptep, pte_t pte)
1174 {
1175         if (sizeof(pteval_t) > sizeof(long))
1176                 /* 5 arg words */
1177                 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1178         else
1179                 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1180 }
1181
1182 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1183 {
1184         pmdval_t val = native_pmd_val(pmd);
1185
1186         if (sizeof(pmdval_t) > sizeof(long))
1187                 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1188         else
1189                 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1190 }
1191
1192 #if PAGETABLE_LEVELS >= 3
1193 static inline pmd_t __pmd(pmdval_t val)
1194 {
1195         pmdval_t ret;
1196
1197         if (sizeof(pmdval_t) > sizeof(long))
1198                 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
1199                                  val, (u64)val >> 32);
1200         else
1201                 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
1202                                  val);
1203
1204         return (pmd_t) { ret };
1205 }
1206
1207 static inline pmdval_t pmd_val(pmd_t pmd)
1208 {
1209         pmdval_t ret;
1210
1211         if (sizeof(pmdval_t) > sizeof(long))
1212                 ret =  PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
1213                                   pmd.pmd, (u64)pmd.pmd >> 32);
1214         else
1215                 ret =  PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
1216                                   pmd.pmd);
1217
1218         return ret;
1219 }
1220
1221 static inline void set_pud(pud_t *pudp, pud_t pud)
1222 {
1223         pudval_t val = native_pud_val(pud);
1224
1225         if (sizeof(pudval_t) > sizeof(long))
1226                 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1227                             val, (u64)val >> 32);
1228         else
1229                 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1230                             val);
1231 }
1232 #if PAGETABLE_LEVELS == 4
1233 static inline pud_t __pud(pudval_t val)
1234 {
1235         pudval_t ret;
1236
1237         if (sizeof(pudval_t) > sizeof(long))
1238                 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
1239                                  val, (u64)val >> 32);
1240         else
1241                 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
1242                                  val);
1243
1244         return (pud_t) { ret };
1245 }
1246
1247 static inline pudval_t pud_val(pud_t pud)
1248 {
1249         pudval_t ret;
1250
1251         if (sizeof(pudval_t) > sizeof(long))
1252                 ret =  PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
1253                                   pud.pud, (u64)pud.pud >> 32);
1254         else
1255                 ret =  PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
1256                                   pud.pud);
1257
1258         return ret;
1259 }
1260
1261 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1262 {
1263         pgdval_t val = native_pgd_val(pgd);
1264
1265         if (sizeof(pgdval_t) > sizeof(long))
1266                 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1267                             val, (u64)val >> 32);
1268         else
1269                 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1270                             val);
1271 }
1272
1273 static inline void pgd_clear(pgd_t *pgdp)
1274 {
1275         set_pgd(pgdp, __pgd(0));
1276 }
1277
1278 static inline void pud_clear(pud_t *pudp)
1279 {
1280         set_pud(pudp, __pud(0));
1281 }
1282
1283 #endif  /* PAGETABLE_LEVELS == 4 */
1284
1285 #endif  /* PAGETABLE_LEVELS >= 3 */
1286
1287 #ifdef CONFIG_X86_PAE
1288 /* Special-case pte-setting operations for PAE, which can't update a
1289    64-bit pte atomically */
1290 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1291 {
1292         PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1293                     pte.pte, pte.pte >> 32);
1294 }
1295
1296 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1297                                    pte_t *ptep, pte_t pte)
1298 {
1299         /* 5 arg words */
1300         pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1301 }
1302
1303 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1304                              pte_t *ptep)
1305 {
1306         PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1307 }
1308
1309 static inline void pmd_clear(pmd_t *pmdp)
1310 {
1311         PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1312 }
1313 #else  /* !CONFIG_X86_PAE */
1314 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1315 {
1316         set_pte(ptep, pte);
1317 }
1318
1319 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1320                                    pte_t *ptep, pte_t pte)
1321 {
1322         set_pte(ptep, pte);
1323 }
1324
1325 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1326                              pte_t *ptep)
1327 {
1328         set_pte_at(mm, addr, ptep, __pte(0));
1329 }
1330
1331 static inline void pmd_clear(pmd_t *pmdp)
1332 {
1333         set_pmd(pmdp, __pmd(0));
1334 }
1335 #endif  /* CONFIG_X86_PAE */
1336
1337 /* Lazy mode for batching updates / context switch */
1338 enum paravirt_lazy_mode {
1339         PARAVIRT_LAZY_NONE,
1340         PARAVIRT_LAZY_MMU,
1341         PARAVIRT_LAZY_CPU,
1342 };
1343
1344 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1345 void paravirt_enter_lazy_cpu(void);
1346 void paravirt_leave_lazy_cpu(void);
1347 void paravirt_enter_lazy_mmu(void);
1348 void paravirt_leave_lazy_mmu(void);
1349 void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1350
1351 #define  __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1352 static inline void arch_enter_lazy_cpu_mode(void)
1353 {
1354         PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1355 }
1356
1357 static inline void arch_leave_lazy_cpu_mode(void)
1358 {
1359         PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1360 }
1361
1362 static inline void arch_flush_lazy_cpu_mode(void)
1363 {
1364         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1365                 arch_leave_lazy_cpu_mode();
1366                 arch_enter_lazy_cpu_mode();
1367         }
1368 }
1369
1370
1371 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1372 static inline void arch_enter_lazy_mmu_mode(void)
1373 {
1374         PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1375 }
1376
1377 static inline void arch_leave_lazy_mmu_mode(void)
1378 {
1379         PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1380 }
1381
1382 static inline void arch_flush_lazy_mmu_mode(void)
1383 {
1384         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1385                 arch_leave_lazy_mmu_mode();
1386                 arch_enter_lazy_mmu_mode();
1387         }
1388 }
1389
1390 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1391                                 unsigned long phys, pgprot_t flags)
1392 {
1393         pv_mmu_ops.set_fixmap(idx, phys, flags);
1394 }
1395
1396 void _paravirt_nop(void);
1397 #define paravirt_nop    ((void *)_paravirt_nop)
1398
1399 void paravirt_use_bytelocks(void);
1400
1401 #ifdef CONFIG_SMP
1402
1403 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1404 {
1405         return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1406 }
1407
1408 static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1409 {
1410         return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1411 }
1412
1413 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1414 {
1415         PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1416 }
1417
1418 static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1419                                                   unsigned long flags)
1420 {
1421         PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1422 }
1423
1424 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1425 {
1426         return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1427 }
1428
1429 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1430 {
1431         PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1432 }
1433
1434 #endif
1435
1436 /* These all sit in the .parainstructions section to tell us what to patch. */
1437 struct paravirt_patch_site {
1438         u8 *instr;              /* original instructions */
1439         u8 instrtype;           /* type of this instruction */
1440         u8 len;                 /* length of original instruction */
1441         u16 clobbers;           /* what registers you may clobber */
1442 };
1443
1444 extern struct paravirt_patch_site __parainstructions[],
1445         __parainstructions_end[];
1446
1447 #ifdef CONFIG_X86_32
1448 #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
1449 #define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
1450 #define PV_FLAGS_ARG "0"
1451 #define PV_EXTRA_CLOBBERS
1452 #define PV_VEXTRA_CLOBBERS
1453 #else
1454 /* We save some registers, but all of them, that's too much. We clobber all
1455  * caller saved registers but the argument parameter */
1456 #define PV_SAVE_REGS "pushq %%rdi;"
1457 #define PV_RESTORE_REGS "popq %%rdi;"
1458 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1459 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1460 #define PV_FLAGS_ARG "D"
1461 #endif
1462
1463 static inline unsigned long __raw_local_save_flags(void)
1464 {
1465         unsigned long f;
1466
1467         asm volatile(paravirt_alt(PV_SAVE_REGS
1468                                   PARAVIRT_CALL
1469                                   PV_RESTORE_REGS)
1470                      : "=a"(f)
1471                      : paravirt_type(pv_irq_ops.save_fl),
1472                        paravirt_clobber(CLBR_EAX)
1473                      : "memory", "cc" PV_VEXTRA_CLOBBERS);
1474         return f;
1475 }
1476
1477 static inline void raw_local_irq_restore(unsigned long f)
1478 {
1479         asm volatile(paravirt_alt(PV_SAVE_REGS
1480                                   PARAVIRT_CALL
1481                                   PV_RESTORE_REGS)
1482                      : "=a"(f)
1483                      : PV_FLAGS_ARG(f),
1484                        paravirt_type(pv_irq_ops.restore_fl),
1485                        paravirt_clobber(CLBR_EAX)
1486                      : "memory", "cc" PV_EXTRA_CLOBBERS);
1487 }
1488
1489 static inline void raw_local_irq_disable(void)
1490 {
1491         asm volatile(paravirt_alt(PV_SAVE_REGS
1492                                   PARAVIRT_CALL
1493                                   PV_RESTORE_REGS)
1494                      :
1495                      : paravirt_type(pv_irq_ops.irq_disable),
1496                        paravirt_clobber(CLBR_EAX)
1497                      : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1498 }
1499
1500 static inline void raw_local_irq_enable(void)
1501 {
1502         asm volatile(paravirt_alt(PV_SAVE_REGS
1503                                   PARAVIRT_CALL
1504                                   PV_RESTORE_REGS)
1505                      :
1506                      : paravirt_type(pv_irq_ops.irq_enable),
1507                        paravirt_clobber(CLBR_EAX)
1508                      : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1509 }
1510
1511 static inline unsigned long __raw_local_irq_save(void)
1512 {
1513         unsigned long f;
1514
1515         f = __raw_local_save_flags();
1516         raw_local_irq_disable();
1517         return f;
1518 }
1519
1520
1521 /* Make sure as little as possible of this mess escapes. */
1522 #undef PARAVIRT_CALL
1523 #undef __PVOP_CALL
1524 #undef __PVOP_VCALL
1525 #undef PVOP_VCALL0
1526 #undef PVOP_CALL0
1527 #undef PVOP_VCALL1
1528 #undef PVOP_CALL1
1529 #undef PVOP_VCALL2
1530 #undef PVOP_CALL2
1531 #undef PVOP_VCALL3
1532 #undef PVOP_CALL3
1533 #undef PVOP_VCALL4
1534 #undef PVOP_CALL4
1535
1536 #else  /* __ASSEMBLY__ */
1537
1538 #define _PVSITE(ptype, clobbers, ops, word, algn)       \
1539 771:;                                           \
1540         ops;                                    \
1541 772:;                                           \
1542         .pushsection .parainstructions,"a";     \
1543          .align algn;                           \
1544          word 771b;                             \
1545          .byte ptype;                           \
1546          .byte 772b-771b;                       \
1547          .short clobbers;                       \
1548         .popsection
1549
1550
1551 #ifdef CONFIG_X86_64
1552 #define PV_SAVE_REGS                            \
1553         push %rax;                              \
1554         push %rcx;                              \
1555         push %rdx;                              \
1556         push %rsi;                              \
1557         push %rdi;                              \
1558         push %r8;                               \
1559         push %r9;                               \
1560         push %r10;                              \
1561         push %r11
1562 #define PV_RESTORE_REGS                         \
1563         pop %r11;                               \
1564         pop %r10;                               \
1565         pop %r9;                                \
1566         pop %r8;                                \
1567         pop %rdi;                               \
1568         pop %rsi;                               \
1569         pop %rdx;                               \
1570         pop %rcx;                               \
1571         pop %rax
1572 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
1573 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1574 #define PARA_INDIRECT(addr)     *addr(%rip)
1575 #else
1576 #define PV_SAVE_REGS   pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1577 #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1578 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
1579 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1580 #define PARA_INDIRECT(addr)     *%cs:addr
1581 #endif
1582
1583 #define INTERRUPT_RETURN                                                \
1584         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
1585                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1586
1587 #define DISABLE_INTERRUPTS(clobbers)                                    \
1588         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1589                   PV_SAVE_REGS;                                         \
1590                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
1591                   PV_RESTORE_REGS;)                     \
1592
1593 #define ENABLE_INTERRUPTS(clobbers)                                     \
1594         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
1595                   PV_SAVE_REGS;                                         \
1596                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
1597                   PV_RESTORE_REGS;)
1598
1599 #define USERGS_SYSRET32                                                 \
1600         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
1601                   CLBR_NONE,                                            \
1602                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1603
1604 #ifdef CONFIG_X86_32
1605 #define GET_CR0_INTO_EAX                                \
1606         push %ecx; push %edx;                           \
1607         call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1608         pop %edx; pop %ecx
1609
1610 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
1611         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1612                   CLBR_NONE,                                            \
1613                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1614
1615
1616 #else   /* !CONFIG_X86_32 */
1617
1618 /*
1619  * If swapgs is used while the userspace stack is still current,
1620  * there's no way to call a pvop.  The PV replacement *must* be
1621  * inlined, or the swapgs instruction must be trapped and emulated.
1622  */
1623 #define SWAPGS_UNSAFE_STACK                                             \
1624         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1625                   swapgs)
1626
1627 #define SWAPGS                                                          \
1628         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1629                   PV_SAVE_REGS;                                         \
1630                   call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs);         \
1631                   PV_RESTORE_REGS                                       \
1632                  )
1633
1634 #define GET_CR2_INTO_RCX                                \
1635         call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1636         movq %rax, %rcx;                                \
1637         xorq %rax, %rax;
1638
1639 #define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
1640         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1641                   CLBR_NONE,                                            \
1642                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1643
1644 #define USERGS_SYSRET64                                                 \
1645         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
1646                   CLBR_NONE,                                            \
1647                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1648
1649 #define ENABLE_INTERRUPTS_SYSEXIT32                                     \
1650         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1651                   CLBR_NONE,                                            \
1652                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1653 #endif  /* CONFIG_X86_32 */
1654
1655 #endif /* __ASSEMBLY__ */
1656 #endif /* CONFIG_PARAVIRT */
1657 #endif  /* __ASM_PARAVIRT_H */