1 /******************************************************************************
2 * arch/ia64/xen/xen_pv_ops.c
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/console.h>
24 #include <linux/irq.h>
25 #include <linux/kernel.h>
27 #include <linux/unistd.h>
29 #include <asm/xen/hypervisor.h>
30 #include <asm/xen/xencomm.h>
31 #include <asm/xen/privop.h>
36 /***************************************************************************
39 static struct pv_info xen_info __initdata = {
40 .kernel_rpl = 2, /* or 1: determin at runtime */
41 .paravirt_enabled = 1,
45 #define IA64_RSC_PL_SHIFT 2
46 #define IA64_RSC_PL_BIT_SIZE 2
47 #define IA64_RSC_PL_MASK \
48 (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
53 /* Xenified Linux/ia64 may run on pl = 1 or 2.
54 * determin at run time. */
55 unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
56 unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
57 xen_info.kernel_rpl = rpl;
60 /***************************************************************************
62 * initialization hooks.
66 xen_panic_hypercall(struct unw_frame_info *info, void *arg)
68 current->thread.ksp = (__u64)info->sw - 16;
69 HYPERVISOR_shutdown(SHUTDOWN_crash);
70 /* we're never actually going to get here... */
74 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
76 unw_init_running(xen_panic_hypercall, NULL);
77 /* we're never actually going to get here... */
81 static struct notifier_block xen_panic_block = {
82 xen_panic_event, NULL, 0 /* try to go last */
85 static void xen_pm_power_off(void)
88 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
95 "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
98 HYPERVISOR_shared_info->arch.start_info_pfn,
99 xen_start_info->nr_pages, xen_start_info->flags);
103 xen_reserve_memory(struct rsvd_region *region)
105 region->start = (unsigned long)__va(
106 (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
107 region->end = region->start + PAGE_SIZE;
112 xen_arch_setup_early(void)
114 struct shared_info *s;
115 BUG_ON(!xen_pv_domain());
117 s = HYPERVISOR_shared_info;
118 xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
120 /* Must be done before any hypercall. */
121 xencomm_initialize();
123 xen_setup_features();
124 /* Register a call for panic conditions. */
125 atomic_notifier_chain_register(&panic_notifier_list,
127 pm_power_off = xen_pm_power_off;
129 xen_ia64_enable_opt_feature();
133 xen_arch_setup_console(char **cmdline_p)
135 add_preferred_console("xenboot", 0, NULL);
136 add_preferred_console("tty", 0, NULL);
138 add_preferred_console("hvc", 0, NULL);
140 #if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
146 xen_arch_setup_nomca(void)
152 xen_post_smp_prepare_boot_cpu(void)
154 xen_setup_vcpu_info_placement();
158 static unsigned long __init_or_module
159 xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
162 xen_patch_branch(unsigned long tag, unsigned long type);
164 static const struct pv_init_ops xen_init_ops __initconst = {
165 .banner = xen_banner,
167 .reserve_memory = xen_reserve_memory,
169 .arch_setup_early = xen_arch_setup_early,
170 .arch_setup_console = xen_arch_setup_console,
171 .arch_setup_nomca = xen_arch_setup_nomca,
173 .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
175 .patch_bundle = xen_patch_bundle,
177 .patch_branch = xen_patch_branch,
180 /***************************************************************************
185 extern unsigned long xen_fsyscall_table[NR_syscalls];
186 extern char xen_fsys_bubble_down[];
187 struct pv_fsys_data xen_fsys_data __initdata = {
188 .fsyscall_table = (unsigned long *)xen_fsyscall_table,
189 .fsys_bubble_down = (void *)xen_fsys_bubble_down,
192 /***************************************************************************
194 * patchdata addresses
197 #define DECLARE(name) \
198 extern unsigned long __xen_start_gate_##name##_patchlist[]; \
199 extern unsigned long __xen_end_gate_##name##_patchlist[]
202 DECLARE(brl_fsys_bubble_down);
204 DECLARE(mckinley_e9);
206 extern unsigned long __xen_start_gate_section[];
208 #define ASSIGN(name) \
209 .start_##name##_patchlist = \
210 (unsigned long)__xen_start_gate_##name##_patchlist, \
211 .end_##name##_patchlist = \
212 (unsigned long)__xen_end_gate_##name##_patchlist
214 static struct pv_patchdata xen_patchdata __initdata = {
216 ASSIGN(brl_fsys_bubble_down),
220 .gate_section = (void*)__xen_start_gate_section,
223 /***************************************************************************
228 #ifndef ASM_SUPPORTED
230 xen_set_itm_with_offset(unsigned long val)
232 /* ia64_cpu_local_tick() calls this with interrupt enabled. */
233 /* WARN_ON(!irqs_disabled()); */
234 xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
238 xen_get_itm_with_offset(void)
240 /* unused at this moment */
241 printk(KERN_DEBUG "%s is called.\n", __func__);
243 WARN_ON(!irqs_disabled());
244 return ia64_native_getreg(_IA64_REG_CR_ITM) +
245 XEN_MAPPEDREGS->itc_offset;
248 /* ia64_set_itc() is only called by
249 * cpu_init() with ia64_set_itc(0) and ia64_sync_itc().
250 * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant.
253 xen_set_itc(unsigned long val)
257 WARN_ON(!irqs_disabled());
258 mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
259 XEN_MAPPEDREGS->itc_offset = val - mitc;
260 XEN_MAPPEDREGS->itc_last = val;
267 unsigned long itc_offset;
268 unsigned long itc_last;
269 unsigned long ret_itc_last;
271 itc_offset = XEN_MAPPEDREGS->itc_offset;
273 itc_last = XEN_MAPPEDREGS->itc_last;
274 res = ia64_native_getreg(_IA64_REG_AR_ITC);
278 ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
280 } while (unlikely(ret_itc_last != itc_last));
284 /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled.
285 Should it be paravirtualized instead? */
286 WARN_ON(!irqs_disabled());
287 itc_offset = XEN_MAPPEDREGS->itc_offset;
288 itc_last = XEN_MAPPEDREGS->itc_last;
289 res = ia64_native_getreg(_IA64_REG_AR_ITC);
293 XEN_MAPPEDREGS->itc_last = res;
298 static void xen_setreg(int regnum, unsigned long val)
301 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
302 xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
304 #ifdef CONFIG_IA32_SUPPORT
305 case _IA64_REG_AR_EFLAG:
309 case _IA64_REG_AR_ITC:
312 case _IA64_REG_CR_TPR:
315 case _IA64_REG_CR_ITM:
316 xen_set_itm_with_offset(val);
318 case _IA64_REG_CR_EOI:
322 ia64_native_setreg_func(regnum, val);
327 static unsigned long xen_getreg(int regnum)
335 #ifdef CONFIG_IA32_SUPPORT
336 case _IA64_REG_AR_EFLAG:
337 res = xen_get_eflag();
340 case _IA64_REG_AR_ITC:
343 case _IA64_REG_CR_ITM:
344 res = xen_get_itm_with_offset();
346 case _IA64_REG_CR_IVR:
349 case _IA64_REG_CR_TPR:
353 res = ia64_native_getreg_func(regnum);
359 /* turning on interrupts is a bit more complicated.. write to the
360 * memory-mapped virtual psr.i bit first (to avoid race condition),
361 * then if any interrupts were pending, we have to execute a hyperprivop
362 * to ensure the pending interrupt gets delivered; else we're done! */
366 int old = xen_get_virtual_psr_i();
367 xen_set_virtual_psr_i(1);
369 if (!old && xen_get_virtual_pend())
373 /* turning off interrupts can be paravirtualized simply by writing
374 * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
378 xen_set_virtual_psr_i(0);
385 return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
389 xen_intrin_local_irq_restore(unsigned long mask)
391 if (mask & IA64_PSR_I)
397 #define __DEFINE_FUNC(name, code) \
398 extern const char xen_ ## name ## _direct_start[]; \
399 extern const char xen_ ## name ## _direct_end[]; \
401 ".proc xen_" #name "\n" \
403 "xen_" #name "_direct_start:\n" \
405 "xen_" #name "_direct_end:\n" \
406 "br.cond.sptk.many b6\n" \
407 ".endp xen_" #name "\n")
409 #define DEFINE_VOID_FUNC0(name, code) \
411 xen_ ## name (void); \
412 __DEFINE_FUNC(name, code)
414 #define DEFINE_VOID_FUNC1(name, code) \
416 xen_ ## name (unsigned long arg); \
417 __DEFINE_FUNC(name, code)
419 #define DEFINE_VOID_FUNC2(name, code) \
421 xen_ ## name (unsigned long arg0, \
422 unsigned long arg1); \
423 __DEFINE_FUNC(name, code)
425 #define DEFINE_FUNC0(name, code) \
426 extern unsigned long \
427 xen_ ## name (void); \
428 __DEFINE_FUNC(name, code)
430 #define DEFINE_FUNC1(name, type, code) \
431 extern unsigned long \
432 xen_ ## name (type arg); \
433 __DEFINE_FUNC(name, code)
435 #define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
438 * static void xen_set_itm_with_offset(unsigned long val)
439 * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
442 DEFINE_VOID_FUNC1(set_itm_with_offset,
443 "mov r2 = " __stringify(XSI_BASE) " + "
444 __stringify(XSI_ITC_OFFSET_OFS) "\n"
449 "break " __stringify(HYPERPRIVOP_SET_ITM) "\n");
452 * static unsigned long xen_get_itm_with_offset(void)
453 * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset;
456 DEFINE_FUNC0(get_itm_with_offset,
457 "mov r2 = " __stringify(XSI_BASE) " + "
458 __stringify(XSI_ITC_OFFSET_OFS) "\n"
463 "add r8 = r8, r2\n");
466 * static void xen_set_itc(unsigned long val)
467 * unsigned long mitc;
469 * WARN_ON(!irqs_disabled());
470 * mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
471 * XEN_MAPPEDREGS->itc_offset = val - mitc;
472 * XEN_MAPPEDREGS->itc_last = val;
475 DEFINE_VOID_FUNC1(set_itc,
476 "mov r2 = " __stringify(XSI_BASE) " + "
477 __stringify(XSI_ITC_LAST_OFS) "\n"
482 __stringify(XSI_ITC_LAST_OFS) " - "
483 __stringify(XSI_ITC_OFFSET_OFS) "\n"
488 * static unsigned long xen_get_itc(void)
490 * unsigned long itc_offset;
491 * unsigned long itc_last;
492 * unsigned long ret_itc_last;
494 * itc_offset = XEN_MAPPEDREGS->itc_offset;
496 * itc_last = XEN_MAPPEDREGS->itc_last;
497 * res = ia64_native_getreg(_IA64_REG_AR_ITC);
499 * if (itc_last >= res)
500 * res = itc_last + 1;
501 * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
503 * } while (unlikely(ret_itc_last != itc_last));
507 DEFINE_FUNC0(get_itc,
508 "mov r2 = " __stringify(XSI_BASE) " + "
509 __stringify(XSI_ITC_OFFSET_OFS) "\n"
511 "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - "
512 __stringify(XSI_ITC_OFFSET_OFS) "\n"
513 /* r9 = itc_offset */
514 /* r2 = XSI_ITC_OFFSET */
516 "mov r8 = ar.itc\n" /* res = ar.itc */
518 "ld8 r3 = [r2]\n" /* r3 = itc_last */
519 "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */
521 "cmp.gtu p6, p0 = r3, r8\n"
523 "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */
527 "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n"
529 "cmp.ne p6, p0 = r10, r3\n"
531 "(p6) br.cond.spnt 888b\n");
533 DEFINE_VOID_FUNC1(fc,
534 "break " __stringify(HYPERPRIVOP_FC) "\n");
537 * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR
538 * masked_addr = *psr_i_addr_addr
539 * pending_intr_addr = masked_addr - 1
540 * if (val & IA64_PSR_I) {
541 * masked = *masked_addr
542 * *masked_addr = 0:xen_set_virtual_psr_i(1)
545 * uint8_t pending = *pending_intr_addr;
550 * *masked_addr = 1:xen_set_virtual_psr_i(0)
554 DEFINE_VOID_FUNC1(intrin_local_irq_restore,
555 /* r8 = input value: 0 or IA64_PSR_I
556 * p6 = (flags & IA64_PSR_I)
558 * p7 = !(flags & IA64_PSR_I)
561 "cmp.ne p6, p7 = r8, r0\n"
562 "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
564 /* r9 = XEN_PSR_I_ADDR */
568 /* r10 = masked previous value */
569 "(p6) ld1.acq r10 = [r9]\n"
572 /* p8 = !masked interrupt masked previously? */
573 "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
575 /* p7 = else clause */
579 "(p7) st1.rel [r9] = r11\n"
583 * r9 = masked_addr - 1
584 * = pending_intr_addr
586 "(p8) st1.rel [r9] = r0, -1\n"
588 /* r8 = pending_intr */
589 "(p8) ld1.acq r11 = [r9]\n"
591 /* p9 = interrupt pending? */
592 "(p8) cmp.ne.unc p9, p10 = r11, r0\n"
595 /* issue hypercall to trigger interrupt */
596 "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n");
598 DEFINE_VOID_FUNC2(ptcga,
599 "break " __stringify(HYPERPRIVOP_PTC_GA) "\n");
600 DEFINE_VOID_FUNC2(set_rr,
601 "break " __stringify(HYPERPRIVOP_SET_RR) "\n");
604 * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR;
607 * psr_i = tmp? 0: IA64_PSR_I;
610 DEFINE_FUNC0(get_psr_i,
611 "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
613 "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */
614 "mov r8 = 0\n" /* psr_i = 0 */
616 "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */
618 "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */
620 "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n");
622 DEFINE_FUNC1(thash, unsigned long,
623 "break " __stringify(HYPERPRIVOP_THASH) "\n");
624 DEFINE_FUNC1(get_cpuid, int,
625 "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n");
626 DEFINE_FUNC1(get_pmd, int,
627 "break " __stringify(HYPERPRIVOP_GET_PMD) "\n");
628 DEFINE_FUNC1(get_rr, unsigned long,
629 "break " __stringify(HYPERPRIVOP_GET_RR) "\n");
632 * void xen_privop_ssm_i(void)
634 * int masked = !xen_get_virtual_psr_i();
635 * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr)
636 * xen_set_virtual_psr_i(1)
637 * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0
638 * // compiler barrier
640 * uint8_t* pend_int_addr =
641 * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1;
642 * uint8_t pending = *pend_int_addr;
648 DEFINE_VOID_FUNC0(ssm_i,
649 "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
651 "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */
653 "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */
655 "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt
656 * r8 = XEN_PSR_I_ADDR - 1
659 "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I
660 * previously interrupt
664 "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */
666 "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/
668 /* issue hypercall to get interrupt */
669 "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
673 * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr
674 * = XEN_PSR_I_ADDR_ADDR;
675 * psr_i_addr = *psr_i_addr_addr;
679 DEFINE_VOID_FUNC0(rsm_i,
680 "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
681 /* r8 = XEN_PSR_I_ADDR */
684 "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */
686 "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */
689 xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
690 unsigned long val2, unsigned long val3,
692 __DEFINE_FUNC(set_rr0_to_rr4,
693 "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n");
696 extern unsigned long xen_getreg(int regnum);
697 #define __DEFINE_GET_REG(id, privop) \
698 "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
700 "cmp.eq p6, p0 = r2, r8\n" \
702 "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \
703 "(p6) br.cond.sptk.many b6\n" \
706 __DEFINE_FUNC(getreg,
707 __DEFINE_GET_REG(PSR, PSR)
708 #ifdef CONFIG_IA32_SUPPORT
709 __DEFINE_GET_REG(AR_EFLAG, EFLAG)
713 "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
715 "cmp.eq p6, p0 = r2, r8\n"
717 "(p6) br.cond.spnt xen_get_itc\n"
721 "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
723 "cmp.eq p6, p0 = r2, r8\n"
725 "(p6) br.cond.spnt xen_get_itm_with_offset\n"
728 __DEFINE_GET_REG(CR_IVR, IVR)
729 __DEFINE_GET_REG(CR_TPR, TPR)
732 "movl r2 = ia64_native_getreg_func\n"
736 "br.cond.sptk.many b7\n");
738 extern void xen_setreg(int regnum, unsigned long val);
739 #define __DEFINE_SET_REG(id, privop) \
740 "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
742 "cmp.eq p6, p0 = r2, r9\n" \
744 "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \
745 "(p6) br.cond.sptk.many b6\n" \
748 __DEFINE_FUNC(setreg,
751 * if (_IA64_REG_AR_KR0 <= regnum &&
752 * regnum <= _IA64_REG_AR_KR7) {
753 * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0
754 * register __val asm ("r9") = val
755 * "break HYPERPRIVOP_SET_KR"
759 "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n"
761 "cmp.ge p6, p0 = r9, r2\n"
762 "sub r17 = r17, r2\n"
764 "(p6) cmp.ge.unc p7, p0 = "
765 __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0)
770 "(p7) mov r8 = r17\n"
771 "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n"
774 "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
776 "cmp.eq p6, p0 = r2, r8\n"
778 "(p6) br.cond.spnt xen_set_itm_with_offset\n"
781 "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
783 "cmp.eq p6, p0 = r2, r8\n"
785 "(p6) br.cond.spnt xen_set_itc\n"
787 #ifdef CONFIG_IA32_SUPPORT
788 __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG)
790 __DEFINE_SET_REG(CR_TPR, SET_TPR)
791 __DEFINE_SET_REG(CR_EOI, EOI)
794 "movl r2 = ia64_native_setreg_func\n"
798 "br.cond.sptk.many b7\n");
801 static const struct pv_cpu_ops xen_cpu_ops __initconst = {
804 .get_cpuid = xen_get_cpuid,
805 .get_pmd = xen_get_pmd,
806 .getreg = xen_getreg,
807 .setreg = xen_setreg,
809 .get_rr = xen_get_rr,
810 .set_rr = xen_set_rr,
811 .set_rr0_to_rr4 = xen_set_rr0_to_rr4,
814 .get_psr_i = xen_get_psr_i,
815 .intrin_local_irq_restore
816 = xen_intrin_local_irq_restore,
819 /******************************************************************************
820 * replacement of hand written assembly codes.
823 extern char xen_switch_to;
824 extern char xen_leave_syscall;
825 extern char xen_work_processed_syscall;
826 extern char xen_leave_kernel;
828 const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
829 .switch_to = (unsigned long)&xen_switch_to,
830 .leave_syscall = (unsigned long)&xen_leave_syscall,
831 .work_processed_syscall = (unsigned long)&xen_work_processed_syscall,
832 .leave_kernel = (unsigned long)&xen_leave_kernel,
835 /***************************************************************************
837 * iosapic read/write hooks.
840 xen_pcat_compat_init(void)
845 static struct irq_chip*
846 xen_iosapic_get_irq_chip(unsigned long trigger)
852 xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
854 struct physdev_apic apic_op;
857 apic_op.apic_physbase = (unsigned long)iosapic -
858 __IA64_UNCACHED_OFFSET;
860 ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
863 return apic_op.value;
867 xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
869 struct physdev_apic apic_op;
871 apic_op.apic_physbase = (unsigned long)iosapic -
872 __IA64_UNCACHED_OFFSET;
875 HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
878 static struct pv_iosapic_ops xen_iosapic_ops __initdata = {
879 .pcat_compat_init = xen_pcat_compat_init,
880 .__get_irq_chip = xen_iosapic_get_irq_chip,
882 .__read = xen_iosapic_read,
883 .__write = xen_iosapic_write,
886 /***************************************************************************
887 * pv_ops initialization
891 xen_setup_pv_ops(void)
895 pv_init_ops = xen_init_ops;
896 pv_fsys_data = xen_fsys_data;
897 pv_patchdata = xen_patchdata;
898 pv_cpu_ops = xen_cpu_ops;
899 pv_iosapic_ops = xen_iosapic_ops;
900 pv_irq_ops = xen_irq_ops;
901 pv_time_ops = xen_time_ops;
903 paravirt_cpu_asm_init(&xen_cpu_asm_switch);
907 /***************************************************************************
909 * pv_init_ops.patch_bundle
912 #define DEFINE_FUNC_GETREG(name, privop) \
913 DEFINE_FUNC0(get_ ## name, \
914 "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n")
916 DEFINE_FUNC_GETREG(psr, PSR);
917 DEFINE_FUNC_GETREG(eflag, EFLAG);
918 DEFINE_FUNC_GETREG(ivr, IVR);
919 DEFINE_FUNC_GETREG(tpr, TPR);
921 #define DEFINE_FUNC_SET_KR(n) \
922 DEFINE_VOID_FUNC0(set_kr ## n, \
925 "mov r8 = " #n "\n" \
926 "break " __stringify(HYPERPRIVOP_SET_KR) "\n")
928 DEFINE_FUNC_SET_KR(0);
929 DEFINE_FUNC_SET_KR(1);
930 DEFINE_FUNC_SET_KR(2);
931 DEFINE_FUNC_SET_KR(3);
932 DEFINE_FUNC_SET_KR(4);
933 DEFINE_FUNC_SET_KR(5);
934 DEFINE_FUNC_SET_KR(6);
935 DEFINE_FUNC_SET_KR(7);
937 #define __DEFINE_FUNC_SETREG(name, privop) \
938 DEFINE_VOID_FUNC0(name, \
939 "break "__stringify(HYPERPRIVOP_ ## privop) "\n")
941 #define DEFINE_FUNC_SETREG(name, privop) \
942 __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop)
944 DEFINE_FUNC_SETREG(eflag, EFLAG);
945 DEFINE_FUNC_SETREG(tpr, TPR);
946 __DEFINE_FUNC_SETREG(eoi, EOI);
948 extern const char xen_check_events[];
949 extern const char __xen_intrin_local_irq_restore_direct_start[];
950 extern const char __xen_intrin_local_irq_restore_direct_end[];
951 extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc;
955 ".proc xen_check_events\n"
956 "xen_check_events:\n"
958 * r9 = masked_addr - 1
959 * = pending_intr_addr
961 "st1.rel [r9] = r0, -1\n"
963 /* r8 = pending_intr */
964 "ld1.acq r11 = [r9]\n"
966 /* p9 = interrupt pending? */
967 "cmp.ne p9, p10 = r11, r0\n"
970 /* issue hypercall to trigger interrupt */
971 "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
972 "br.cond.sptk.many b6\n"
973 ".endp xen_check_events\n"
976 ".proc __xen_intrin_local_irq_restore_direct\n"
977 "__xen_intrin_local_irq_restore_direct:\n"
978 "__xen_intrin_local_irq_restore_direct_start:\n"
981 "cmp.ne p6, p7 = r8, r0\n"
982 "mov r17 = ip\n" /* get ip to calc return address */
983 "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n"
987 /* r9 = XEN_PSR_I_ADDR */
990 /* r10 = masked previous value */
991 "(p6) ld1.acq r10 = [r9]\n"
992 "adds r17 = 1f - 1b, r17\n" /* calculate return address */
996 /* p8 = !masked interrupt masked previously? */
997 "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
999 /* p7 = else clause */
1000 "(p7) mov r11 = 1\n"
1002 "(p8) mov b6 = r17\n" /* set return address */
1006 "(p7) st1.rel [r9] = r11\n"
1009 "(p8) brl.cond.dptk.few xen_check_events\n"
1011 /* pv calling stub is 5 bundles. fill nop to adjust return address */
1018 "__xen_intrin_local_irq_restore_direct_end:\n"
1019 ".endp __xen_intrin_local_irq_restore_direct\n"
1022 "__xen_intrin_local_irq_restore_direct_reloc:\n"
1026 static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[]
1027 __initdata_or_module =
1029 #define XEN_PATCH_BUNDLE_ELEM(name, type) \
1031 (void*)xen_ ## name ## _direct_start, \
1032 (void*)xen_ ## name ## _direct_end, \
1033 PARAVIRT_PATCH_TYPE_ ## type, \
1036 XEN_PATCH_BUNDLE_ELEM(fc, FC),
1037 XEN_PATCH_BUNDLE_ELEM(thash, THASH),
1038 XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
1039 XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
1040 XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
1041 XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
1042 XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
1043 XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
1044 XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
1045 XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
1046 XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
1048 (void*)__xen_intrin_local_irq_restore_direct_start,
1049 (void*)__xen_intrin_local_irq_restore_direct_end,
1050 PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE,
1053 #define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \
1055 xen_get_ ## name ## _direct_start, \
1056 xen_get_ ## name ## _direct_end, \
1057 PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
1060 XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
1061 XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG),
1063 XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR),
1064 XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR),
1066 XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC),
1067 XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM),
1070 #define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
1072 xen_ ## name ## _direct_start, \
1073 xen_ ## name ## _direct_end, \
1074 PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
1077 #define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
1078 __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg)
1080 XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0),
1081 XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1),
1082 XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2),
1083 XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3),
1084 XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4),
1085 XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5),
1086 XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6),
1087 XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7),
1089 XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG),
1090 XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR),
1091 __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI),
1093 XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC),
1094 XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM),
1097 static unsigned long __init_or_module
1098 xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
1100 const unsigned long nelems = sizeof(xen_patch_bundle_elems) /
1101 sizeof(xen_patch_bundle_elems[0]);
1103 const struct paravirt_patch_bundle_elem *found;
1105 used = __paravirt_patch_apply_bundle(sbundle, ebundle, type,
1106 xen_patch_bundle_elems, nelems,
1111 return ia64_native_patch_bundle(sbundle, ebundle, type);
1117 case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: {
1118 unsigned long reloc =
1119 __xen_intrin_local_irq_restore_direct_reloc;
1120 unsigned long reloc_offset = reloc - (unsigned long)
1121 __xen_intrin_local_irq_restore_direct_start;
1122 unsigned long tag = (unsigned long)sbundle + reloc_offset;
1123 paravirt_patch_reloc_brl(tag, xen_check_events);
1132 #endif /* ASM_SUPPOTED */
1134 const struct paravirt_patch_branch_target xen_branch_target[]
1136 #define PARAVIRT_BR_TARGET(name, type) \
1139 PARAVIRT_PATCH_TYPE_BR_ ## type, \
1141 PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
1142 PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
1143 PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
1144 PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
1148 xen_patch_branch(unsigned long tag, unsigned long type)
1150 const unsigned long nelem =
1151 sizeof(xen_branch_target) / sizeof(xen_branch_target[0]);
1152 __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem);