]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-x86/paravirt.h
x86/paravirt/xen: properly fill out the ldt ops
[linux-2.6-omap-h63xx.git] / include / asm-x86 / paravirt.h
1 #ifndef __ASM_PARAVIRT_H
2 #define __ASM_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/page.h>
8 #include <asm/asm.h>
9
10 /* Bitmask of what can be clobbered: usually at least eax. */
11 #define CLBR_NONE 0
12 #define CLBR_EAX  (1 << 0)
13 #define CLBR_ECX  (1 << 1)
14 #define CLBR_EDX  (1 << 2)
15
16 #ifdef CONFIG_X86_64
17 #define CLBR_RSI  (1 << 3)
18 #define CLBR_RDI  (1 << 4)
19 #define CLBR_R8   (1 << 5)
20 #define CLBR_R9   (1 << 6)
21 #define CLBR_R10  (1 << 7)
22 #define CLBR_R11  (1 << 8)
23 #define CLBR_ANY  ((1 << 9) - 1)
24 #include <asm/desc_defs.h>
25 #else
26 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
27 #define CLBR_ANY  ((1 << 3) - 1)
28 #endif /* X86_64 */
29
30 #ifndef __ASSEMBLY__
31 #include <linux/types.h>
32 #include <linux/cpumask.h>
33 #include <asm/kmap_types.h>
34 #include <asm/desc_defs.h>
35
36 struct page;
37 struct thread_struct;
38 struct desc_ptr;
39 struct tss_struct;
40 struct mm_struct;
41 struct desc_struct;
42
43 /* general info */
44 struct pv_info {
45         unsigned int kernel_rpl;
46         int shared_kernel_pmd;
47         int paravirt_enabled;
48         const char *name;
49 };
50
51 struct pv_init_ops {
52         /*
53          * Patch may replace one of the defined code sequences with
54          * arbitrary code, subject to the same register constraints.
55          * This generally means the code is not free to clobber any
56          * registers other than EAX.  The patch function should return
57          * the number of bytes of code generated, as we nop pad the
58          * rest in generic code.
59          */
60         unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
61                           unsigned long addr, unsigned len);
62
63         /* Basic arch-specific setup */
64         void (*arch_setup)(void);
65         char *(*memory_setup)(void);
66         void (*post_allocator_init)(void);
67
68         /* Print a banner to identify the environment */
69         void (*banner)(void);
70 };
71
72
73 struct pv_lazy_ops {
74         /* Set deferred update mode, used for batching operations. */
75         void (*enter)(void);
76         void (*leave)(void);
77 };
78
79 struct pv_time_ops {
80         void (*time_init)(void);
81
82         /* Set and set time of day */
83         unsigned long (*get_wallclock)(void);
84         int (*set_wallclock)(unsigned long);
85
86         unsigned long long (*sched_clock)(void);
87         unsigned long (*get_tsc_khz)(void);
88 };
89
90 struct pv_cpu_ops {
91         /* hooks for various privileged instructions */
92         unsigned long (*get_debugreg)(int regno);
93         void (*set_debugreg)(int regno, unsigned long value);
94
95         void (*clts)(void);
96
97         unsigned long (*read_cr0)(void);
98         void (*write_cr0)(unsigned long);
99
100         unsigned long (*read_cr4_safe)(void);
101         unsigned long (*read_cr4)(void);
102         void (*write_cr4)(unsigned long);
103
104 #ifdef CONFIG_X86_64
105         unsigned long (*read_cr8)(void);
106         void (*write_cr8)(unsigned long);
107 #endif
108
109         /* Segment descriptor handling */
110         void (*load_tr_desc)(void);
111         void (*load_gdt)(const struct desc_ptr *);
112         void (*load_idt)(const struct desc_ptr *);
113         void (*store_gdt)(struct desc_ptr *);
114         void (*store_idt)(struct desc_ptr *);
115         void (*set_ldt)(const void *desc, unsigned entries);
116         unsigned long (*store_tr)(void);
117         void (*load_tls)(struct thread_struct *t, unsigned int cpu);
118 #ifdef CONFIG_X86_64
119         void (*load_gs_index)(unsigned int idx);
120 #endif
121         void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
122                                 const void *desc);
123         void (*write_gdt_entry)(struct desc_struct *,
124                                 int entrynum, const void *desc, int size);
125         void (*write_idt_entry)(gate_desc *,
126                                 int entrynum, const gate_desc *gate);
127         void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
128         void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
129
130         void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
131
132         void (*set_iopl_mask)(unsigned mask);
133
134         void (*wbinvd)(void);
135         void (*io_delay)(void);
136
137         /* cpuid emulation, mostly so that caps bits can be disabled */
138         void (*cpuid)(unsigned int *eax, unsigned int *ebx,
139                       unsigned int *ecx, unsigned int *edx);
140
141         /* MSR, PMC and TSR operations.
142            err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
143         u64 (*read_msr)(unsigned int msr, int *err);
144         int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
145
146         u64 (*read_tsc)(void);
147         u64 (*read_pmc)(int counter);
148         unsigned long long (*read_tscp)(unsigned int *aux);
149
150         /*
151          * Atomically enable interrupts and return to userspace.  This
152          * is only ever used to return to 32-bit processes; in a
153          * 64-bit kernel, it's used for 32-on-64 compat processes, but
154          * never native 64-bit processes.  (Jump, not call.)
155          */
156         void (*irq_enable_sysexit)(void);
157
158         /*
159          * Switch to usermode gs and return to 64-bit usermode using
160          * sysret.  Only used in 64-bit kernels to return to 64-bit
161          * processes.  Usermode register state, including %rsp, must
162          * already be restored.
163          */
164         void (*usergs_sysret64)(void);
165
166         /*
167          * Switch to usermode gs and return to 32-bit usermode using
168          * sysret.  Used to return to 32-on-64 compat processes.
169          * Other usermode register state, including %esp, must already
170          * be restored.
171          */
172         void (*usergs_sysret32)(void);
173
174         /* Normal iret.  Jump to this with the standard iret stack
175            frame set up. */
176         void (*iret)(void);
177
178         void (*swapgs)(void);
179
180         struct pv_lazy_ops lazy_mode;
181 };
182
183 struct pv_irq_ops {
184         void (*init_IRQ)(void);
185
186         /*
187          * Get/set interrupt state.  save_fl and restore_fl are only
188          * expected to use X86_EFLAGS_IF; all other bits
189          * returned from save_fl are undefined, and may be ignored by
190          * restore_fl.
191          */
192         unsigned long (*save_fl)(void);
193         void (*restore_fl)(unsigned long);
194         void (*irq_disable)(void);
195         void (*irq_enable)(void);
196         void (*safe_halt)(void);
197         void (*halt)(void);
198
199 #ifdef CONFIG_X86_64
200         void (*adjust_exception_frame)(void);
201 #endif
202 };
203
204 struct pv_apic_ops {
205 #ifdef CONFIG_X86_LOCAL_APIC
206         /*
207          * Direct APIC operations, principally for VMI.  Ideally
208          * these shouldn't be in this interface.
209          */
210         void (*apic_write)(unsigned long reg, u32 v);
211         u32 (*apic_read)(unsigned long reg);
212         void (*setup_boot_clock)(void);
213         void (*setup_secondary_clock)(void);
214
215         void (*startup_ipi_hook)(int phys_apicid,
216                                  unsigned long start_eip,
217                                  unsigned long start_esp);
218 #endif
219 };
220
221 struct pv_mmu_ops {
222         /*
223          * Called before/after init_mm pagetable setup. setup_start
224          * may reset %cr3, and may pre-install parts of the pagetable;
225          * pagetable setup is expected to preserve any existing
226          * mapping.
227          */
228         void (*pagetable_setup_start)(pgd_t *pgd_base);
229         void (*pagetable_setup_done)(pgd_t *pgd_base);
230
231         unsigned long (*read_cr2)(void);
232         void (*write_cr2)(unsigned long);
233
234         unsigned long (*read_cr3)(void);
235         void (*write_cr3)(unsigned long);
236
237         /*
238          * Hooks for intercepting the creation/use/destruction of an
239          * mm_struct.
240          */
241         void (*activate_mm)(struct mm_struct *prev,
242                             struct mm_struct *next);
243         void (*dup_mmap)(struct mm_struct *oldmm,
244                          struct mm_struct *mm);
245         void (*exit_mmap)(struct mm_struct *mm);
246
247
248         /* TLB operations */
249         void (*flush_tlb_user)(void);
250         void (*flush_tlb_kernel)(void);
251         void (*flush_tlb_single)(unsigned long addr);
252         void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
253                                  unsigned long va);
254
255         /* Hooks for allocating and freeing a pagetable top-level */
256         int  (*pgd_alloc)(struct mm_struct *mm);
257         void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
258
259         /*
260          * Hooks for allocating/releasing pagetable pages when they're
261          * attached to a pagetable
262          */
263         void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
264         void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
265         void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
266         void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
267         void (*release_pte)(u32 pfn);
268         void (*release_pmd)(u32 pfn);
269         void (*release_pud)(u32 pfn);
270
271         /* Pagetable manipulation functions */
272         void (*set_pte)(pte_t *ptep, pte_t pteval);
273         void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
274                            pte_t *ptep, pte_t pteval);
275         void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
276         void (*pte_update)(struct mm_struct *mm, unsigned long addr,
277                            pte_t *ptep);
278         void (*pte_update_defer)(struct mm_struct *mm,
279                                  unsigned long addr, pte_t *ptep);
280
281         pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
282                                         pte_t *ptep);
283         void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
284                                         pte_t *ptep, pte_t pte);
285
286         pteval_t (*pte_val)(pte_t);
287         pteval_t (*pte_flags)(pte_t);
288         pte_t (*make_pte)(pteval_t pte);
289
290         pgdval_t (*pgd_val)(pgd_t);
291         pgd_t (*make_pgd)(pgdval_t pgd);
292
293 #if PAGETABLE_LEVELS >= 3
294 #ifdef CONFIG_X86_PAE
295         void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
296         void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
297                                 pte_t *ptep, pte_t pte);
298         void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
299                           pte_t *ptep);
300         void (*pmd_clear)(pmd_t *pmdp);
301
302 #endif  /* CONFIG_X86_PAE */
303
304         void (*set_pud)(pud_t *pudp, pud_t pudval);
305
306         pmdval_t (*pmd_val)(pmd_t);
307         pmd_t (*make_pmd)(pmdval_t pmd);
308
309 #if PAGETABLE_LEVELS == 4
310         pudval_t (*pud_val)(pud_t);
311         pud_t (*make_pud)(pudval_t pud);
312
313         void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
314 #endif  /* PAGETABLE_LEVELS == 4 */
315 #endif  /* PAGETABLE_LEVELS >= 3 */
316
317 #ifdef CONFIG_HIGHPTE
318         void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
319 #endif
320
321         struct pv_lazy_ops lazy_mode;
322
323         /* dom0 ops */
324
325         /* Sometimes the physical address is a pfn, and sometimes its
326            an mfn.  We can tell which is which from the index. */
327         void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
328                            unsigned long phys, pgprot_t flags);
329 };
330
331 struct raw_spinlock;
332 struct pv_lock_ops {
333         int (*spin_is_locked)(struct raw_spinlock *lock);
334         int (*spin_is_contended)(struct raw_spinlock *lock);
335         void (*spin_lock)(struct raw_spinlock *lock);
336         int (*spin_trylock)(struct raw_spinlock *lock);
337         void (*spin_unlock)(struct raw_spinlock *lock);
338 };
339
340 /* This contains all the paravirt structures: we get a convenient
341  * number for each function using the offset which we use to indicate
342  * what to patch. */
343 struct paravirt_patch_template {
344         struct pv_init_ops pv_init_ops;
345         struct pv_time_ops pv_time_ops;
346         struct pv_cpu_ops pv_cpu_ops;
347         struct pv_irq_ops pv_irq_ops;
348         struct pv_apic_ops pv_apic_ops;
349         struct pv_mmu_ops pv_mmu_ops;
350         struct pv_lock_ops pv_lock_ops;
351 };
352
353 extern struct pv_info pv_info;
354 extern struct pv_init_ops pv_init_ops;
355 extern struct pv_time_ops pv_time_ops;
356 extern struct pv_cpu_ops pv_cpu_ops;
357 extern struct pv_irq_ops pv_irq_ops;
358 extern struct pv_apic_ops pv_apic_ops;
359 extern struct pv_mmu_ops pv_mmu_ops;
360 extern struct pv_lock_ops pv_lock_ops;
361
362 #define PARAVIRT_PATCH(x)                                       \
363         (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
364
365 #define paravirt_type(op)                               \
366         [paravirt_typenum] "i" (PARAVIRT_PATCH(op)),    \
367         [paravirt_opptr] "m" (op)
368 #define paravirt_clobber(clobber)               \
369         [paravirt_clobber] "i" (clobber)
370
371 /*
372  * Generate some code, and mark it as patchable by the
373  * apply_paravirt() alternate instruction patcher.
374  */
375 #define _paravirt_alt(insn_string, type, clobber)       \
376         "771:\n\t" insn_string "\n" "772:\n"            \
377         ".pushsection .parainstructions,\"a\"\n"        \
378         _ASM_ALIGN "\n"                                 \
379         _ASM_PTR " 771b\n"                              \
380         "  .byte " type "\n"                            \
381         "  .byte 772b-771b\n"                           \
382         "  .short " clobber "\n"                        \
383         ".popsection\n"
384
385 /* Generate patchable code, with the default asm parameters. */
386 #define paravirt_alt(insn_string)                                       \
387         _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
388
389 /* Simple instruction patching code. */
390 #define DEF_NATIVE(ops, name, code)                                     \
391         extern const char start_##ops##_##name[], end_##ops##_##name[]; \
392         asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
393
394 unsigned paravirt_patch_nop(void);
395 unsigned paravirt_patch_ignore(unsigned len);
396 unsigned paravirt_patch_call(void *insnbuf,
397                              const void *target, u16 tgt_clobbers,
398                              unsigned long addr, u16 site_clobbers,
399                              unsigned len);
400 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
401                             unsigned long addr, unsigned len);
402 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
403                                 unsigned long addr, unsigned len);
404
405 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
406                               const char *start, const char *end);
407
408 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
409                       unsigned long addr, unsigned len);
410
411 int paravirt_disable_iospace(void);
412
413 /*
414  * This generates an indirect call based on the operation type number.
415  * The type number, computed in PARAVIRT_PATCH, is derived from the
416  * offset into the paravirt_patch_template structure, and can therefore be
417  * freely converted back into a structure offset.
418  */
419 #define PARAVIRT_CALL   "call *%[paravirt_opptr];"
420
421 /*
422  * These macros are intended to wrap calls through one of the paravirt
423  * ops structs, so that they can be later identified and patched at
424  * runtime.
425  *
426  * Normally, a call to a pv_op function is a simple indirect call:
427  * (pv_op_struct.operations)(args...).
428  *
429  * Unfortunately, this is a relatively slow operation for modern CPUs,
430  * because it cannot necessarily determine what the destination
431  * address is.  In this case, the address is a runtime constant, so at
432  * the very least we can patch the call to e a simple direct call, or
433  * ideally, patch an inline implementation into the callsite.  (Direct
434  * calls are essentially free, because the call and return addresses
435  * are completely predictable.)
436  *
437  * For i386, these macros rely on the standard gcc "regparm(3)" calling
438  * convention, in which the first three arguments are placed in %eax,
439  * %edx, %ecx (in that order), and the remaining arguments are placed
440  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
441  * to be modified (either clobbered or used for return values).
442  * X86_64, on the other hand, already specifies a register-based calling
443  * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
444  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
445  * special handling for dealing with 4 arguments, unlike i386.
446  * However, x86_64 also have to clobber all caller saved registers, which
447  * unfortunately, are quite a bit (r8 - r11)
448  *
449  * The call instruction itself is marked by placing its start address
450  * and size into the .parainstructions section, so that
451  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
452  * appropriate patching under the control of the backend pv_init_ops
453  * implementation.
454  *
455  * Unfortunately there's no way to get gcc to generate the args setup
456  * for the call, and then allow the call itself to be generated by an
457  * inline asm.  Because of this, we must do the complete arg setup and
458  * return value handling from within these macros.  This is fairly
459  * cumbersome.
460  *
461  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
462  * It could be extended to more arguments, but there would be little
463  * to be gained from that.  For each number of arguments, there are
464  * the two VCALL and CALL variants for void and non-void functions.
465  *
466  * When there is a return value, the invoker of the macro must specify
467  * the return type.  The macro then uses sizeof() on that type to
468  * determine whether its a 32 or 64 bit value, and places the return
469  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
470  * 64-bit). For x86_64 machines, it just returns at %rax regardless of
471  * the return value size.
472  *
473  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
474  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
475  * in low,high order
476  *
477  * Small structures are passed and returned in registers.  The macro
478  * calling convention can't directly deal with this, so the wrapper
479  * functions must do this.
480  *
481  * These PVOP_* macros are only defined within this header.  This
482  * means that all uses must be wrapped in inline functions.  This also
483  * makes sure the incoming and outgoing types are always correct.
484  */
485 #ifdef CONFIG_X86_32
486 #define PVOP_VCALL_ARGS                 unsigned long __eax, __edx, __ecx
487 #define PVOP_CALL_ARGS                  PVOP_VCALL_ARGS
488 #define PVOP_VCALL_CLOBBERS             "=a" (__eax), "=d" (__edx),     \
489                                         "=c" (__ecx)
490 #define PVOP_CALL_CLOBBERS              PVOP_VCALL_CLOBBERS
491 #define EXTRA_CLOBBERS
492 #define VEXTRA_CLOBBERS
493 #else
494 #define PVOP_VCALL_ARGS         unsigned long __edi, __esi, __edx, __ecx
495 #define PVOP_CALL_ARGS          PVOP_VCALL_ARGS, __eax
496 #define PVOP_VCALL_CLOBBERS     "=D" (__edi),                           \
497                                 "=S" (__esi), "=d" (__edx),             \
498                                 "=c" (__ecx)
499
500 #define PVOP_CALL_CLOBBERS      PVOP_VCALL_CLOBBERS, "=a" (__eax)
501
502 #define EXTRA_CLOBBERS   , "r8", "r9", "r10", "r11"
503 #define VEXTRA_CLOBBERS  , "rax", "r8", "r9", "r10", "r11"
504 #endif
505
506 #ifdef CONFIG_PARAVIRT_DEBUG
507 #define PVOP_TEST_NULL(op)      BUG_ON(op == NULL)
508 #else
509 #define PVOP_TEST_NULL(op)      ((void)op)
510 #endif
511
512 #define __PVOP_CALL(rettype, op, pre, post, ...)                        \
513         ({                                                              \
514                 rettype __ret;                                          \
515                 PVOP_CALL_ARGS;                                 \
516                 PVOP_TEST_NULL(op);                                     \
517                 /* This is 32-bit specific, but is okay in 64-bit */    \
518                 /* since this condition will never hold */              \
519                 if (sizeof(rettype) > sizeof(unsigned long)) {          \
520                         asm volatile(pre                                \
521                                      paravirt_alt(PARAVIRT_CALL)        \
522                                      post                               \
523                                      : PVOP_CALL_CLOBBERS               \
524                                      : paravirt_type(op),               \
525                                        paravirt_clobber(CLBR_ANY),      \
526                                        ##__VA_ARGS__                    \
527                                      : "memory", "cc" EXTRA_CLOBBERS);  \
528                         __ret = (rettype)((((u64)__edx) << 32) | __eax); \
529                 } else {                                                \
530                         asm volatile(pre                                \
531                                      paravirt_alt(PARAVIRT_CALL)        \
532                                      post                               \
533                                      : PVOP_CALL_CLOBBERS               \
534                                      : paravirt_type(op),               \
535                                        paravirt_clobber(CLBR_ANY),      \
536                                        ##__VA_ARGS__                    \
537                                      : "memory", "cc" EXTRA_CLOBBERS);  \
538                         __ret = (rettype)__eax;                         \
539                 }                                                       \
540                 __ret;                                                  \
541         })
542 #define __PVOP_VCALL(op, pre, post, ...)                                \
543         ({                                                              \
544                 PVOP_VCALL_ARGS;                                        \
545                 PVOP_TEST_NULL(op);                                     \
546                 asm volatile(pre                                        \
547                              paravirt_alt(PARAVIRT_CALL)                \
548                              post                                       \
549                              : PVOP_VCALL_CLOBBERS                      \
550                              : paravirt_type(op),                       \
551                                paravirt_clobber(CLBR_ANY),              \
552                                ##__VA_ARGS__                            \
553                              : "memory", "cc" VEXTRA_CLOBBERS);         \
554         })
555
556 #define PVOP_CALL0(rettype, op)                                         \
557         __PVOP_CALL(rettype, op, "", "")
558 #define PVOP_VCALL0(op)                                                 \
559         __PVOP_VCALL(op, "", "")
560
561 #define PVOP_CALL1(rettype, op, arg1)                                   \
562         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
563 #define PVOP_VCALL1(op, arg1)                                           \
564         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
565
566 #define PVOP_CALL2(rettype, op, arg1, arg2)                             \
567         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
568         "1" ((unsigned long)(arg2)))
569 #define PVOP_VCALL2(op, arg1, arg2)                                     \
570         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
571         "1" ((unsigned long)(arg2)))
572
573 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                       \
574         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
575         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
576 #define PVOP_VCALL3(op, arg1, arg2, arg3)                               \
577         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
578         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
579
580 /* This is the only difference in x86_64. We can make it much simpler */
581 #ifdef CONFIG_X86_32
582 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
583         __PVOP_CALL(rettype, op,                                        \
584                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
585                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
586                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
587 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
588         __PVOP_VCALL(op,                                                \
589                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
590                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
591                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
592 #else
593 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
594         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
595         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
596         "3"((unsigned long)(arg4)))
597 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
598         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
599         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
600         "3"((unsigned long)(arg4)))
601 #endif
602
603 static inline int paravirt_enabled(void)
604 {
605         return pv_info.paravirt_enabled;
606 }
607
608 static inline void load_sp0(struct tss_struct *tss,
609                              struct thread_struct *thread)
610 {
611         PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
612 }
613
614 #define ARCH_SETUP                      pv_init_ops.arch_setup();
615 static inline unsigned long get_wallclock(void)
616 {
617         return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
618 }
619
620 static inline int set_wallclock(unsigned long nowtime)
621 {
622         return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
623 }
624
625 static inline void (*choose_time_init(void))(void)
626 {
627         return pv_time_ops.time_init;
628 }
629
630 /* The paravirtualized CPUID instruction. */
631 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
632                            unsigned int *ecx, unsigned int *edx)
633 {
634         PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
635 }
636
637 /*
638  * These special macros can be used to get or set a debugging register
639  */
640 static inline unsigned long paravirt_get_debugreg(int reg)
641 {
642         return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
643 }
644 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
645 static inline void set_debugreg(unsigned long val, int reg)
646 {
647         PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
648 }
649
650 static inline void clts(void)
651 {
652         PVOP_VCALL0(pv_cpu_ops.clts);
653 }
654
655 static inline unsigned long read_cr0(void)
656 {
657         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
658 }
659
660 static inline void write_cr0(unsigned long x)
661 {
662         PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
663 }
664
665 static inline unsigned long read_cr2(void)
666 {
667         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
668 }
669
670 static inline void write_cr2(unsigned long x)
671 {
672         PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
673 }
674
675 static inline unsigned long read_cr3(void)
676 {
677         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
678 }
679
680 static inline void write_cr3(unsigned long x)
681 {
682         PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
683 }
684
685 static inline unsigned long read_cr4(void)
686 {
687         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
688 }
689 static inline unsigned long read_cr4_safe(void)
690 {
691         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
692 }
693
694 static inline void write_cr4(unsigned long x)
695 {
696         PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
697 }
698
699 #ifdef CONFIG_X86_64
700 static inline unsigned long read_cr8(void)
701 {
702         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
703 }
704
705 static inline void write_cr8(unsigned long x)
706 {
707         PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
708 }
709 #endif
710
711 static inline void raw_safe_halt(void)
712 {
713         PVOP_VCALL0(pv_irq_ops.safe_halt);
714 }
715
716 static inline void halt(void)
717 {
718         PVOP_VCALL0(pv_irq_ops.safe_halt);
719 }
720
721 static inline void wbinvd(void)
722 {
723         PVOP_VCALL0(pv_cpu_ops.wbinvd);
724 }
725
726 #define get_kernel_rpl()  (pv_info.kernel_rpl)
727
728 static inline u64 paravirt_read_msr(unsigned msr, int *err)
729 {
730         return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
731 }
732 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
733 {
734         return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
735 }
736
737 /* These should all do BUG_ON(_err), but our headers are too tangled. */
738 #define rdmsr(msr, val1, val2)                  \
739 do {                                            \
740         int _err;                               \
741         u64 _l = paravirt_read_msr(msr, &_err); \
742         val1 = (u32)_l;                         \
743         val2 = _l >> 32;                        \
744 } while (0)
745
746 #define wrmsr(msr, val1, val2)                  \
747 do {                                            \
748         paravirt_write_msr(msr, val1, val2);    \
749 } while (0)
750
751 #define rdmsrl(msr, val)                        \
752 do {                                            \
753         int _err;                               \
754         val = paravirt_read_msr(msr, &_err);    \
755 } while (0)
756
757 #define wrmsrl(msr, val)        wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
758 #define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
759
760 /* rdmsr with exception handling */
761 #define rdmsr_safe(msr, a, b)                   \
762 ({                                              \
763         int _err;                               \
764         u64 _l = paravirt_read_msr(msr, &_err); \
765         (*a) = (u32)_l;                         \
766         (*b) = _l >> 32;                        \
767         _err;                                   \
768 })
769
770 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
771 {
772         int err;
773
774         *p = paravirt_read_msr(msr, &err);
775         return err;
776 }
777
778 static inline u64 paravirt_read_tsc(void)
779 {
780         return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
781 }
782
783 #define rdtscl(low)                             \
784 do {                                            \
785         u64 _l = paravirt_read_tsc();           \
786         low = (int)_l;                          \
787 } while (0)
788
789 #define rdtscll(val) (val = paravirt_read_tsc())
790
791 static inline unsigned long long paravirt_sched_clock(void)
792 {
793         return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
794 }
795 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
796
797 static inline unsigned long long paravirt_read_pmc(int counter)
798 {
799         return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
800 }
801
802 #define rdpmc(counter, low, high)               \
803 do {                                            \
804         u64 _l = paravirt_read_pmc(counter);    \
805         low = (u32)_l;                          \
806         high = _l >> 32;                        \
807 } while (0)
808
809 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
810 {
811         return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
812 }
813
814 #define rdtscp(low, high, aux)                          \
815 do {                                                    \
816         int __aux;                                      \
817         unsigned long __val = paravirt_rdtscp(&__aux);  \
818         (low) = (u32)__val;                             \
819         (high) = (u32)(__val >> 32);                    \
820         (aux) = __aux;                                  \
821 } while (0)
822
823 #define rdtscpll(val, aux)                              \
824 do {                                                    \
825         unsigned long __aux;                            \
826         val = paravirt_rdtscp(&__aux);                  \
827         (aux) = __aux;                                  \
828 } while (0)
829
830 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
831 {
832         PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
833 }
834
835 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
836 {
837         PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
838 }
839
840 static inline void load_TR_desc(void)
841 {
842         PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
843 }
844 static inline void load_gdt(const struct desc_ptr *dtr)
845 {
846         PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
847 }
848 static inline void load_idt(const struct desc_ptr *dtr)
849 {
850         PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
851 }
852 static inline void set_ldt(const void *addr, unsigned entries)
853 {
854         PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
855 }
856 static inline void store_gdt(struct desc_ptr *dtr)
857 {
858         PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
859 }
860 static inline void store_idt(struct desc_ptr *dtr)
861 {
862         PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
863 }
864 static inline unsigned long paravirt_store_tr(void)
865 {
866         return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
867 }
868 #define store_tr(tr)    ((tr) = paravirt_store_tr())
869 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
870 {
871         PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
872 }
873
874 #ifdef CONFIG_X86_64
875 static inline void load_gs_index(unsigned int gs)
876 {
877         PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
878 }
879 #endif
880
881 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
882                                    const void *desc)
883 {
884         PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
885 }
886
887 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
888                                    void *desc, int type)
889 {
890         PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
891 }
892
893 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
894 {
895         PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
896 }
897 static inline void set_iopl_mask(unsigned mask)
898 {
899         PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
900 }
901
902 /* The paravirtualized I/O functions */
903 static inline void slow_down_io(void)
904 {
905         pv_cpu_ops.io_delay();
906 #ifdef REALLY_SLOW_IO
907         pv_cpu_ops.io_delay();
908         pv_cpu_ops.io_delay();
909         pv_cpu_ops.io_delay();
910 #endif
911 }
912
913 #ifdef CONFIG_X86_LOCAL_APIC
914 /*
915  * Basic functions accessing APICs.
916  */
917 static inline void apic_write(unsigned long reg, u32 v)
918 {
919         PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
920 }
921
922 static inline u32 apic_read(unsigned long reg)
923 {
924         return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
925 }
926
927 static inline void setup_boot_clock(void)
928 {
929         PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
930 }
931
932 static inline void setup_secondary_clock(void)
933 {
934         PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
935 }
936 #endif
937
938 static inline void paravirt_post_allocator_init(void)
939 {
940         if (pv_init_ops.post_allocator_init)
941                 (*pv_init_ops.post_allocator_init)();
942 }
943
944 static inline void paravirt_pagetable_setup_start(pgd_t *base)
945 {
946         (*pv_mmu_ops.pagetable_setup_start)(base);
947 }
948
949 static inline void paravirt_pagetable_setup_done(pgd_t *base)
950 {
951         (*pv_mmu_ops.pagetable_setup_done)(base);
952 }
953
954 #ifdef CONFIG_SMP
955 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
956                                     unsigned long start_esp)
957 {
958         PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
959                     phys_apicid, start_eip, start_esp);
960 }
961 #endif
962
963 static inline void paravirt_activate_mm(struct mm_struct *prev,
964                                         struct mm_struct *next)
965 {
966         PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
967 }
968
969 static inline void arch_dup_mmap(struct mm_struct *oldmm,
970                                  struct mm_struct *mm)
971 {
972         PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
973 }
974
975 static inline void arch_exit_mmap(struct mm_struct *mm)
976 {
977         PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
978 }
979
980 static inline void __flush_tlb(void)
981 {
982         PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
983 }
984 static inline void __flush_tlb_global(void)
985 {
986         PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
987 }
988 static inline void __flush_tlb_single(unsigned long addr)
989 {
990         PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
991 }
992
993 static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
994                                     unsigned long va)
995 {
996         PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
997 }
998
999 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
1000 {
1001         return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
1002 }
1003
1004 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1005 {
1006         PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1007 }
1008
1009 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
1010 {
1011         PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
1012 }
1013 static inline void paravirt_release_pte(unsigned pfn)
1014 {
1015         PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1016 }
1017
1018 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
1019 {
1020         PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1021 }
1022
1023 static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
1024                                             unsigned start, unsigned count)
1025 {
1026         PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1027 }
1028 static inline void paravirt_release_pmd(unsigned pfn)
1029 {
1030         PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1031 }
1032
1033 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
1034 {
1035         PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1036 }
1037 static inline void paravirt_release_pud(unsigned pfn)
1038 {
1039         PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1040 }
1041
1042 #ifdef CONFIG_HIGHPTE
1043 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1044 {
1045         unsigned long ret;
1046         ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
1047         return (void *)ret;
1048 }
1049 #endif
1050
1051 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1052                               pte_t *ptep)
1053 {
1054         PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
1055 }
1056
1057 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1058                                     pte_t *ptep)
1059 {
1060         PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
1061 }
1062
1063 static inline pte_t __pte(pteval_t val)
1064 {
1065         pteval_t ret;
1066
1067         if (sizeof(pteval_t) > sizeof(long))
1068                 ret = PVOP_CALL2(pteval_t,
1069                                  pv_mmu_ops.make_pte,
1070                                  val, (u64)val >> 32);
1071         else
1072                 ret = PVOP_CALL1(pteval_t,
1073                                  pv_mmu_ops.make_pte,
1074                                  val);
1075
1076         return (pte_t) { .pte = ret };
1077 }
1078
1079 static inline pteval_t pte_val(pte_t pte)
1080 {
1081         pteval_t ret;
1082
1083         if (sizeof(pteval_t) > sizeof(long))
1084                 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
1085                                  pte.pte, (u64)pte.pte >> 32);
1086         else
1087                 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1088                                  pte.pte);
1089
1090         return ret;
1091 }
1092
1093 static inline pteval_t pte_flags(pte_t pte)
1094 {
1095         pteval_t ret;
1096
1097         if (sizeof(pteval_t) > sizeof(long))
1098                 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1099                                  pte.pte, (u64)pte.pte >> 32);
1100         else
1101                 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1102                                  pte.pte);
1103
1104 #ifdef CONFIG_PARAVIRT_DEBUG
1105         BUG_ON(ret & PTE_PFN_MASK);
1106 #endif
1107         return ret;
1108 }
1109
1110 static inline pgd_t __pgd(pgdval_t val)
1111 {
1112         pgdval_t ret;
1113
1114         if (sizeof(pgdval_t) > sizeof(long))
1115                 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
1116                                  val, (u64)val >> 32);
1117         else
1118                 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
1119                                  val);
1120
1121         return (pgd_t) { ret };
1122 }
1123
1124 static inline pgdval_t pgd_val(pgd_t pgd)
1125 {
1126         pgdval_t ret;
1127
1128         if (sizeof(pgdval_t) > sizeof(long))
1129                 ret =  PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
1130                                   pgd.pgd, (u64)pgd.pgd >> 32);
1131         else
1132                 ret =  PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
1133                                   pgd.pgd);
1134
1135         return ret;
1136 }
1137
1138 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1139 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1140                                            pte_t *ptep)
1141 {
1142         pteval_t ret;
1143
1144         ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1145                          mm, addr, ptep);
1146
1147         return (pte_t) { .pte = ret };
1148 }
1149
1150 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1151                                            pte_t *ptep, pte_t pte)
1152 {
1153         if (sizeof(pteval_t) > sizeof(long))
1154                 /* 5 arg words */
1155                 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1156         else
1157                 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1158                             mm, addr, ptep, pte.pte);
1159 }
1160
1161 static inline void set_pte(pte_t *ptep, pte_t pte)
1162 {
1163         if (sizeof(pteval_t) > sizeof(long))
1164                 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1165                             pte.pte, (u64)pte.pte >> 32);
1166         else
1167                 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1168                             pte.pte);
1169 }
1170
1171 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1172                               pte_t *ptep, pte_t pte)
1173 {
1174         if (sizeof(pteval_t) > sizeof(long))
1175                 /* 5 arg words */
1176                 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1177         else
1178                 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1179 }
1180
1181 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1182 {
1183         pmdval_t val = native_pmd_val(pmd);
1184
1185         if (sizeof(pmdval_t) > sizeof(long))
1186                 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1187         else
1188                 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1189 }
1190
1191 #if PAGETABLE_LEVELS >= 3
1192 static inline pmd_t __pmd(pmdval_t val)
1193 {
1194         pmdval_t ret;
1195
1196         if (sizeof(pmdval_t) > sizeof(long))
1197                 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
1198                                  val, (u64)val >> 32);
1199         else
1200                 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
1201                                  val);
1202
1203         return (pmd_t) { ret };
1204 }
1205
1206 static inline pmdval_t pmd_val(pmd_t pmd)
1207 {
1208         pmdval_t ret;
1209
1210         if (sizeof(pmdval_t) > sizeof(long))
1211                 ret =  PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
1212                                   pmd.pmd, (u64)pmd.pmd >> 32);
1213         else
1214                 ret =  PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
1215                                   pmd.pmd);
1216
1217         return ret;
1218 }
1219
1220 static inline void set_pud(pud_t *pudp, pud_t pud)
1221 {
1222         pudval_t val = native_pud_val(pud);
1223
1224         if (sizeof(pudval_t) > sizeof(long))
1225                 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1226                             val, (u64)val >> 32);
1227         else
1228                 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1229                             val);
1230 }
1231 #if PAGETABLE_LEVELS == 4
1232 static inline pud_t __pud(pudval_t val)
1233 {
1234         pudval_t ret;
1235
1236         if (sizeof(pudval_t) > sizeof(long))
1237                 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
1238                                  val, (u64)val >> 32);
1239         else
1240                 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
1241                                  val);
1242
1243         return (pud_t) { ret };
1244 }
1245
1246 static inline pudval_t pud_val(pud_t pud)
1247 {
1248         pudval_t ret;
1249
1250         if (sizeof(pudval_t) > sizeof(long))
1251                 ret =  PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
1252                                   pud.pud, (u64)pud.pud >> 32);
1253         else
1254                 ret =  PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
1255                                   pud.pud);
1256
1257         return ret;
1258 }
1259
1260 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1261 {
1262         pgdval_t val = native_pgd_val(pgd);
1263
1264         if (sizeof(pgdval_t) > sizeof(long))
1265                 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1266                             val, (u64)val >> 32);
1267         else
1268                 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1269                             val);
1270 }
1271
1272 static inline void pgd_clear(pgd_t *pgdp)
1273 {
1274         set_pgd(pgdp, __pgd(0));
1275 }
1276
1277 static inline void pud_clear(pud_t *pudp)
1278 {
1279         set_pud(pudp, __pud(0));
1280 }
1281
1282 #endif  /* PAGETABLE_LEVELS == 4 */
1283
1284 #endif  /* PAGETABLE_LEVELS >= 3 */
1285
1286 #ifdef CONFIG_X86_PAE
1287 /* Special-case pte-setting operations for PAE, which can't update a
1288    64-bit pte atomically */
1289 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1290 {
1291         PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1292                     pte.pte, pte.pte >> 32);
1293 }
1294
1295 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1296                                    pte_t *ptep, pte_t pte)
1297 {
1298         /* 5 arg words */
1299         pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1300 }
1301
1302 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1303                              pte_t *ptep)
1304 {
1305         PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1306 }
1307
1308 static inline void pmd_clear(pmd_t *pmdp)
1309 {
1310         PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1311 }
1312 #else  /* !CONFIG_X86_PAE */
1313 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1314 {
1315         set_pte(ptep, pte);
1316 }
1317
1318 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1319                                    pte_t *ptep, pte_t pte)
1320 {
1321         set_pte(ptep, pte);
1322 }
1323
1324 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1325                              pte_t *ptep)
1326 {
1327         set_pte_at(mm, addr, ptep, __pte(0));
1328 }
1329
1330 static inline void pmd_clear(pmd_t *pmdp)
1331 {
1332         set_pmd(pmdp, __pmd(0));
1333 }
1334 #endif  /* CONFIG_X86_PAE */
1335
1336 /* Lazy mode for batching updates / context switch */
1337 enum paravirt_lazy_mode {
1338         PARAVIRT_LAZY_NONE,
1339         PARAVIRT_LAZY_MMU,
1340         PARAVIRT_LAZY_CPU,
1341 };
1342
1343 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1344 void paravirt_enter_lazy_cpu(void);
1345 void paravirt_leave_lazy_cpu(void);
1346 void paravirt_enter_lazy_mmu(void);
1347 void paravirt_leave_lazy_mmu(void);
1348 void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1349
1350 #define  __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1351 static inline void arch_enter_lazy_cpu_mode(void)
1352 {
1353         PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1354 }
1355
1356 static inline void arch_leave_lazy_cpu_mode(void)
1357 {
1358         PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1359 }
1360
1361 static inline void arch_flush_lazy_cpu_mode(void)
1362 {
1363         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1364                 arch_leave_lazy_cpu_mode();
1365                 arch_enter_lazy_cpu_mode();
1366         }
1367 }
1368
1369
1370 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1371 static inline void arch_enter_lazy_mmu_mode(void)
1372 {
1373         PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1374 }
1375
1376 static inline void arch_leave_lazy_mmu_mode(void)
1377 {
1378         PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1379 }
1380
1381 static inline void arch_flush_lazy_mmu_mode(void)
1382 {
1383         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1384                 arch_leave_lazy_mmu_mode();
1385                 arch_enter_lazy_mmu_mode();
1386         }
1387 }
1388
1389 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1390                                 unsigned long phys, pgprot_t flags)
1391 {
1392         pv_mmu_ops.set_fixmap(idx, phys, flags);
1393 }
1394
1395 void _paravirt_nop(void);
1396 #define paravirt_nop    ((void *)_paravirt_nop)
1397
1398 void paravirt_use_bytelocks(void);
1399
1400 #ifdef CONFIG_SMP
1401
1402 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1403 {
1404         return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1405 }
1406
1407 static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1408 {
1409         return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1410 }
1411
1412 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1413 {
1414         PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1415 }
1416
1417 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1418 {
1419         return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1420 }
1421
1422 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1423 {
1424         PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1425 }
1426
1427 #endif
1428
1429 /* These all sit in the .parainstructions section to tell us what to patch. */
1430 struct paravirt_patch_site {
1431         u8 *instr;              /* original instructions */
1432         u8 instrtype;           /* type of this instruction */
1433         u8 len;                 /* length of original instruction */
1434         u16 clobbers;           /* what registers you may clobber */
1435 };
1436
1437 extern struct paravirt_patch_site __parainstructions[],
1438         __parainstructions_end[];
1439
1440 #ifdef CONFIG_X86_32
1441 #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
1442 #define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
1443 #define PV_FLAGS_ARG "0"
1444 #define PV_EXTRA_CLOBBERS
1445 #define PV_VEXTRA_CLOBBERS
1446 #else
1447 /* We save some registers, but all of them, that's too much. We clobber all
1448  * caller saved registers but the argument parameter */
1449 #define PV_SAVE_REGS "pushq %%rdi;"
1450 #define PV_RESTORE_REGS "popq %%rdi;"
1451 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1452 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1453 #define PV_FLAGS_ARG "D"
1454 #endif
1455
1456 static inline unsigned long __raw_local_save_flags(void)
1457 {
1458         unsigned long f;
1459
1460         asm volatile(paravirt_alt(PV_SAVE_REGS
1461                                   PARAVIRT_CALL
1462                                   PV_RESTORE_REGS)
1463                      : "=a"(f)
1464                      : paravirt_type(pv_irq_ops.save_fl),
1465                        paravirt_clobber(CLBR_EAX)
1466                      : "memory", "cc" PV_VEXTRA_CLOBBERS);
1467         return f;
1468 }
1469
1470 static inline void raw_local_irq_restore(unsigned long f)
1471 {
1472         asm volatile(paravirt_alt(PV_SAVE_REGS
1473                                   PARAVIRT_CALL
1474                                   PV_RESTORE_REGS)
1475                      : "=a"(f)
1476                      : PV_FLAGS_ARG(f),
1477                        paravirt_type(pv_irq_ops.restore_fl),
1478                        paravirt_clobber(CLBR_EAX)
1479                      : "memory", "cc" PV_EXTRA_CLOBBERS);
1480 }
1481
1482 static inline void raw_local_irq_disable(void)
1483 {
1484         asm volatile(paravirt_alt(PV_SAVE_REGS
1485                                   PARAVIRT_CALL
1486                                   PV_RESTORE_REGS)
1487                      :
1488                      : paravirt_type(pv_irq_ops.irq_disable),
1489                        paravirt_clobber(CLBR_EAX)
1490                      : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1491 }
1492
1493 static inline void raw_local_irq_enable(void)
1494 {
1495         asm volatile(paravirt_alt(PV_SAVE_REGS
1496                                   PARAVIRT_CALL
1497                                   PV_RESTORE_REGS)
1498                      :
1499                      : paravirt_type(pv_irq_ops.irq_enable),
1500                        paravirt_clobber(CLBR_EAX)
1501                      : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1502 }
1503
1504 static inline unsigned long __raw_local_irq_save(void)
1505 {
1506         unsigned long f;
1507
1508         f = __raw_local_save_flags();
1509         raw_local_irq_disable();
1510         return f;
1511 }
1512
1513
1514 /* Make sure as little as possible of this mess escapes. */
1515 #undef PARAVIRT_CALL
1516 #undef __PVOP_CALL
1517 #undef __PVOP_VCALL
1518 #undef PVOP_VCALL0
1519 #undef PVOP_CALL0
1520 #undef PVOP_VCALL1
1521 #undef PVOP_CALL1
1522 #undef PVOP_VCALL2
1523 #undef PVOP_CALL2
1524 #undef PVOP_VCALL3
1525 #undef PVOP_CALL3
1526 #undef PVOP_VCALL4
1527 #undef PVOP_CALL4
1528
1529 #else  /* __ASSEMBLY__ */
1530
1531 #define _PVSITE(ptype, clobbers, ops, word, algn)       \
1532 771:;                                           \
1533         ops;                                    \
1534 772:;                                           \
1535         .pushsection .parainstructions,"a";     \
1536          .align algn;                           \
1537          word 771b;                             \
1538          .byte ptype;                           \
1539          .byte 772b-771b;                       \
1540          .short clobbers;                       \
1541         .popsection
1542
1543
1544 #ifdef CONFIG_X86_64
1545 #define PV_SAVE_REGS                            \
1546         push %rax;                              \
1547         push %rcx;                              \
1548         push %rdx;                              \
1549         push %rsi;                              \
1550         push %rdi;                              \
1551         push %r8;                               \
1552         push %r9;                               \
1553         push %r10;                              \
1554         push %r11
1555 #define PV_RESTORE_REGS                         \
1556         pop %r11;                               \
1557         pop %r10;                               \
1558         pop %r9;                                \
1559         pop %r8;                                \
1560         pop %rdi;                               \
1561         pop %rsi;                               \
1562         pop %rdx;                               \
1563         pop %rcx;                               \
1564         pop %rax
1565 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
1566 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1567 #define PARA_INDIRECT(addr)     *addr(%rip)
1568 #else
1569 #define PV_SAVE_REGS   pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1570 #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1571 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
1572 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1573 #define PARA_INDIRECT(addr)     *%cs:addr
1574 #endif
1575
1576 #define INTERRUPT_RETURN                                                \
1577         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
1578                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1579
1580 #define DISABLE_INTERRUPTS(clobbers)                                    \
1581         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1582                   PV_SAVE_REGS;                                         \
1583                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
1584                   PV_RESTORE_REGS;)                     \
1585
1586 #define ENABLE_INTERRUPTS(clobbers)                                     \
1587         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
1588                   PV_SAVE_REGS;                                         \
1589                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
1590                   PV_RESTORE_REGS;)
1591
1592 #define USERGS_SYSRET32                                                 \
1593         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
1594                   CLBR_NONE,                                            \
1595                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1596
1597 #ifdef CONFIG_X86_32
1598 #define GET_CR0_INTO_EAX                                \
1599         push %ecx; push %edx;                           \
1600         call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1601         pop %edx; pop %ecx
1602
1603 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
1604         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1605                   CLBR_NONE,                                            \
1606                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1607
1608
1609 #else   /* !CONFIG_X86_32 */
1610
1611 /*
1612  * If swapgs is used while the userspace stack is still current,
1613  * there's no way to call a pvop.  The PV replacement *must* be
1614  * inlined, or the swapgs instruction must be trapped and emulated.
1615  */
1616 #define SWAPGS_UNSAFE_STACK                                             \
1617         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1618                   swapgs)
1619
1620 #define SWAPGS                                                          \
1621         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1622                   PV_SAVE_REGS;                                         \
1623                   call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs);         \
1624                   PV_RESTORE_REGS                                       \
1625                  )
1626
1627 #define GET_CR2_INTO_RCX                                \
1628         call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1629         movq %rax, %rcx;                                \
1630         xorq %rax, %rax;
1631
1632 #define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
1633         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1634                   CLBR_NONE,                                            \
1635                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1636
1637 #define USERGS_SYSRET64                                                 \
1638         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
1639                   CLBR_NONE,                                            \
1640                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1641
1642 #define ENABLE_INTERRUPTS_SYSEXIT32                                     \
1643         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1644                   CLBR_NONE,                                            \
1645                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1646 #endif  /* CONFIG_X86_32 */
1647
1648 #endif /* __ASSEMBLY__ */
1649 #endif /* CONFIG_PARAVIRT */
1650 #endif  /* __ASM_PARAVIRT_H */