1 /*P:700 The pagetable code, on the other hand, still shows the scars of
2 * previous encounters. It's functional, and as neat as it can be in the
3 * circumstances, but be wary, for these things are subtle and break easily.
4 * The Guest provides a virtual to physical mapping, but we can neither trust
5 * it nor use it: we verify and convert it here to point the hardware to the
6 * actual Guest pages when running the Guest. :*/
8 /* Copyright (C) Rusty Russell IBM Corporation 2006.
9 * GPL v2 and any later version */
11 #include <linux/types.h>
12 #include <linux/spinlock.h>
13 #include <linux/random.h>
14 #include <linux/percpu.h>
15 #include <asm/tlbflush.h>
18 /*M:008 We hold reference to pages, which prevents them from being swapped.
19 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
20 * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
21 * could probably consider launching Guests as non-root. :*/
26 * We use two-level page tables for the Guest. If you're not entirely
27 * comfortable with virtual addresses, physical addresses and page tables then
28 * I recommend you review lguest.c's "Page Table Handling" (with diagrams!).
30 * The Guest keeps page tables, but we maintain the actual ones here: these are
31 * called "shadow" page tables. Which is a very Guest-centric name: these are
32 * the real page tables the CPU uses, although we keep them up to date to
33 * reflect the Guest's. (See what I mean about weird naming? Since when do
34 * shadows reflect anything?)
36 * Anyway, this is the most complicated part of the Host code. There are seven
38 * (i) Setting up a page table entry for the Guest when it faults,
39 * (ii) Setting up the page table entry for the Guest stack,
40 * (iii) Setting up a page table entry when the Guest tells us it has changed,
41 * (iv) Switching page tables,
42 * (v) Flushing (thowing away) page tables,
43 * (vi) Mapping the Switcher when the Guest is about to run,
44 * (vii) Setting up the page tables initially.
48 /* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is
49 * conveniently placed at the top 4MB, so it uses a separate, complete PTE
51 #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
53 /* We actually need a separate PTE page for each CPU. Remember that after the
54 * Switcher code itself comes two pages for each CPU, and we don't want this
55 * CPU's guest to see the pages of any other CPU. */
56 static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
57 #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
59 /*H:320 With our shadow and Guest types established, we need to deal with
60 * them: the page table code is curly enough to need helper functions to keep
63 * There are two functions which return pointers to the shadow (aka "real")
66 * spgd_addr() takes the virtual address and returns a pointer to the top-level
67 * page directory entry for that address. Since we keep track of several page
68 * tables, the "i" argument tells us which one we're interested in (it's
69 * usually the current one). */
70 static pgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr)
72 unsigned int index = pgd_index(vaddr);
74 /* We kill any Guest trying to touch the Switcher addresses. */
75 if (index >= SWITCHER_PGD_INDEX) {
76 kill_guest(lg, "attempt to access switcher pages");
79 /* Return a pointer index'th pgd entry for the i'th page table. */
80 return &lg->pgdirs[i].pgdir[index];
83 /* This routine then takes the PGD entry given above, which contains the
84 * address of the PTE page. It then returns a pointer to the PTE entry for the
86 static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr)
88 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
89 /* You should never call this if the PGD entry wasn't valid */
90 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
91 return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE];
94 /* These two functions just like the above two, except they access the Guest
95 * page tables. Hence they return a Guest address. */
96 static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr)
98 unsigned int index = vaddr >> (PGDIR_SHIFT);
99 return lg->pgdirs[lg->pgdidx].gpgdir + index * sizeof(pgd_t);
102 static unsigned long gpte_addr(struct lguest *lg,
103 pgd_t gpgd, unsigned long vaddr)
105 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
106 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
107 return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t);
110 /*H:350 This routine takes a page number given by the Guest and converts it to
111 * an actual, physical page number. It can fail for several reasons: the
112 * virtual address might not be mapped by the Launcher, the write flag is set
113 * and the page is read-only, or the write flag was set and the page was
114 * shared so had to be copied, but we ran out of memory.
116 * This holds a reference to the page, so release_pte() is careful to
118 static unsigned long get_pfn(unsigned long virtpfn, int write)
121 /* This value indicates failure. */
122 unsigned long ret = -1UL;
124 /* get_user_pages() is a complex interface: it gets the "struct
125 * vm_area_struct" and "struct page" assocated with a range of pages.
126 * It also needs the task's mmap_sem held, and is not very quick.
127 * It returns the number of pages it got. */
128 down_read(¤t->mm->mmap_sem);
129 if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT,
130 1, write, 1, &page, NULL) == 1)
131 ret = page_to_pfn(page);
132 up_read(¤t->mm->mmap_sem);
136 /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
137 * entry can be a little tricky. The flags are (almost) the same, but the
138 * Guest PTE contains a virtual page number: the CPU needs the real page
140 static pte_t gpte_to_spte(struct lguest *lg, pte_t gpte, int write)
142 unsigned long pfn, base, flags;
144 /* The Guest sets the global flag, because it thinks that it is using
145 * PGE. We only told it to use PGE so it would tell us whether it was
146 * flushing a kernel mapping or a userspace mapping. We don't actually
147 * use the global bit, so throw it away. */
148 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
150 /* The Guest's pages are offset inside the Launcher. */
151 base = (unsigned long)lg->mem_base / PAGE_SIZE;
153 /* We need a temporary "unsigned long" variable to hold the answer from
154 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
155 * fit in spte.pfn. get_pfn() finds the real physical number of the
156 * page, given the virtual number. */
157 pfn = get_pfn(base + pte_pfn(gpte), write);
159 kill_guest(lg, "failed to get page %lu", pte_pfn(gpte));
160 /* When we destroy the Guest, we'll go through the shadow page
161 * tables and release_pte() them. Make sure we don't think
162 * this one is valid! */
165 /* Now we assemble our shadow PTE from the page number and flags. */
166 return pfn_pte(pfn, __pgprot(flags));
169 /*H:460 And to complete the chain, release_pte() looks like this: */
170 static void release_pte(pte_t pte)
172 /* Remember that get_user_pages() took a reference to the page, in
173 * get_pfn()? We have to put it back now. */
174 if (pte_flags(pte) & _PAGE_PRESENT)
175 put_page(pfn_to_page(pte_pfn(pte)));
179 static void check_gpte(struct lguest *lg, pte_t gpte)
181 if ((pte_flags(gpte) & (_PAGE_PWT|_PAGE_PSE))
182 || pte_pfn(gpte) >= lg->pfn_limit)
183 kill_guest(lg, "bad page table entry");
186 static void check_gpgd(struct lguest *lg, pgd_t gpgd)
188 if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || pgd_pfn(gpgd) >= lg->pfn_limit)
189 kill_guest(lg, "bad page directory entry");
193 * (i) Setting up a page table entry for the Guest when it faults
195 * We saw this call in run_guest(): when we see a page fault in the Guest, we
196 * come here. That's because we only set up the shadow page tables lazily as
197 * they're needed, so we get page faults all the time and quietly fix them up
198 * and return to the Guest without it knowing.
200 * If we fixed up the fault (ie. we mapped the address), this routine returns
202 int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
206 unsigned long gpte_ptr;
210 /* First step: get the top-level Guest page table entry. */
211 gpgd = __pgd(lgread_u32(lg, gpgd_addr(lg, vaddr)));
212 /* Toplevel not present? We can't map it in. */
213 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
216 /* Now look at the matching shadow entry. */
217 spgd = spgd_addr(lg, lg->pgdidx, vaddr);
218 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
219 /* No shadow entry: allocate a new shadow PTE page. */
220 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
221 /* This is not really the Guest's fault, but killing it is
222 * simple for this corner case. */
224 kill_guest(lg, "out of memory allocating pte page");
227 /* We check that the Guest pgd is OK. */
228 check_gpgd(lg, gpgd);
229 /* And we copy the flags to the shadow PGD entry. The page
230 * number in the shadow PGD is the page we just allocated. */
231 *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd));
234 /* OK, now we look at the lower level in the Guest page table: keep its
235 * address, because we might update it later. */
236 gpte_ptr = gpte_addr(lg, gpgd, vaddr);
237 gpte = __pte(lgread_u32(lg, gpte_ptr));
239 /* If this page isn't in the Guest page tables, we can't page it in. */
240 if (!(pte_flags(gpte) & _PAGE_PRESENT))
243 /* Check they're not trying to write to a page the Guest wants
244 * read-only (bit 2 of errcode == write). */
245 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
248 /* User access to a kernel page? (bit 3 == user access) */
249 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
252 /* Check that the Guest PTE flags are OK, and the page number is below
253 * the pfn_limit (ie. not mapping the Launcher binary). */
254 check_gpte(lg, gpte);
255 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
256 gpte = pte_mkyoung(gpte);
259 gpte = pte_mkdirty(gpte);
261 /* Get the pointer to the shadow PTE entry we're going to set. */
262 spte = spte_addr(lg, *spgd, vaddr);
263 /* If there was a valid shadow PTE entry here before, we release it.
264 * This can happen with a write to a previously read-only entry. */
267 /* If this is a write, we insist that the Guest page is writable (the
268 * final arg to gpte_to_spte()). */
270 *spte = gpte_to_spte(lg, gpte, 1);
272 /* If this is a read, don't set the "writable" bit in the page
273 * table entry, even if the Guest says it's writable. That way
274 * we come back here when a write does actually ocur, so we can
275 * update the Guest's _PAGE_DIRTY flag. */
276 *spte = gpte_to_spte(lg, pte_wrprotect(gpte), 0);
278 /* Finally, we write the Guest PTE entry back: we've set the
279 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
280 lgwrite_u32(lg, gpte_ptr, pte_val(gpte));
282 /* We succeeded in mapping the page! */
286 /*H:360 (ii) Setting up the page table entry for the Guest stack.
288 * Remember pin_stack_pages() which makes sure the stack is mapped? It could
289 * simply call demand_page(), but as we've seen that logic is quite long, and
290 * usually the stack pages are already mapped anyway, so it's not required.
292 * This is a quick version which answers the question: is this virtual address
293 * mapped by the shadow page tables, and is it writable? */
294 static int page_writable(struct lguest *lg, unsigned long vaddr)
299 /* Look at the top level entry: is it present? */
300 spgd = spgd_addr(lg, lg->pgdidx, vaddr);
301 if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
304 /* Check the flags on the pte entry itself: it must be present and
306 flags = pte_flags(*(spte_addr(lg, *spgd, vaddr)));
308 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
311 /* So, when pin_stack_pages() asks us to pin a page, we check if it's already
312 * in the page tables, and if not, we call demand_page() with error code 2
313 * (meaning "write"). */
314 void pin_page(struct lguest *lg, unsigned long vaddr)
316 if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2))
317 kill_guest(lg, "bad stack page %#lx", vaddr);
320 /*H:450 If we chase down the release_pgd() code, it looks like this: */
321 static void release_pgd(struct lguest *lg, pgd_t *spgd)
323 /* If the entry's not present, there's nothing to release. */
324 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
326 /* Converting the pfn to find the actual PTE page is easy: turn
327 * the page number into a physical address, then convert to a
328 * virtual address (easy for kernel pages like this one). */
329 pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
330 /* For each entry in the page, we might need to release it. */
331 for (i = 0; i < PTRS_PER_PTE; i++)
332 release_pte(ptepage[i]);
333 /* Now we can free the page of PTEs */
334 free_page((long)ptepage);
335 /* And zero out the PGD entry we we never release it twice. */
340 /*H:440 (v) Flushing (thowing away) page tables,
342 * We saw flush_user_mappings() called when we re-used a top-level pgdir page.
343 * It simply releases every PTE page from 0 up to the kernel address. */
344 static void flush_user_mappings(struct lguest *lg, int idx)
347 /* Release every pgd entry up to the kernel's address. */
348 for (i = 0; i < pgd_index(lg->page_offset); i++)
349 release_pgd(lg, lg->pgdirs[idx].pgdir + i);
352 /* The Guest also has a hypercall to do this manually: it's used when a large
353 * number of mappings have been changed. */
354 void guest_pagetable_flush_user(struct lguest *lg)
356 /* Drop the userspace part of the current page table. */
357 flush_user_mappings(lg, lg->pgdidx);
361 /* We keep several page tables. This is a simple routine to find the page
362 * table (if any) corresponding to this top-level address the Guest has given
364 static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
367 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
368 if (lg->pgdirs[i].gpgdir == pgtable)
373 /*H:435 And this is us, creating the new page directory. If we really do
374 * allocate a new one (and so the kernel parts are not there), we set
376 static unsigned int new_pgdir(struct lguest *lg,
377 unsigned long gpgdir,
382 /* We pick one entry at random to throw out. Choosing the Least
383 * Recently Used might be better, but this is easy. */
384 next = random32() % ARRAY_SIZE(lg->pgdirs);
385 /* If it's never been allocated at all before, try now. */
386 if (!lg->pgdirs[next].pgdir) {
387 lg->pgdirs[next].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
388 /* If the allocation fails, just keep using the one we have */
389 if (!lg->pgdirs[next].pgdir)
392 /* This is a blank page, so there are no kernel
393 * mappings: caller must map the stack! */
396 /* Record which Guest toplevel this shadows. */
397 lg->pgdirs[next].gpgdir = gpgdir;
398 /* Release all the non-kernel mappings. */
399 flush_user_mappings(lg, next);
404 /*H:430 (iv) Switching page tables
406 * This is what happens when the Guest changes page tables (ie. changes the
407 * top-level pgdir). This happens on almost every context switch. */
408 void guest_new_pagetable(struct lguest *lg, unsigned long pgtable)
410 int newpgdir, repin = 0;
412 /* Look to see if we have this one already. */
413 newpgdir = find_pgdir(lg, pgtable);
414 /* If not, we allocate or mug an existing one: if it's a fresh one,
415 * repin gets set to 1. */
416 if (newpgdir == ARRAY_SIZE(lg->pgdirs))
417 newpgdir = new_pgdir(lg, pgtable, &repin);
418 /* Change the current pgd index to the new one. */
419 lg->pgdidx = newpgdir;
420 /* If it was completely blank, we map in the Guest kernel stack */
425 /*H:470 Finally, a routine which throws away everything: all PGD entries in all
426 * the shadow page tables. This is used when we destroy the Guest. */
427 static void release_all_pagetables(struct lguest *lg)
431 /* Every shadow pagetable this Guest has */
432 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
433 if (lg->pgdirs[i].pgdir)
434 /* Every PGD entry except the Switcher at the top */
435 for (j = 0; j < SWITCHER_PGD_INDEX; j++)
436 release_pgd(lg, lg->pgdirs[i].pgdir + j);
439 /* We also throw away everything when a Guest tells us it's changed a kernel
440 * mapping. Since kernel mappings are in every page table, it's easiest to
441 * throw them all away. This is amazingly slow, but thankfully rare. */
442 void guest_pagetable_clear_all(struct lguest *lg)
444 release_all_pagetables(lg);
445 /* We need the Guest kernel stack mapped again. */
449 /*H:420 This is the routine which actually sets the page table entry for then
450 * "idx"'th shadow page table.
452 * Normally, we can just throw out the old entry and replace it with 0: if they
453 * use it demand_page() will put the new entry in. We need to do this anyway:
454 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
455 * is read from, and _PAGE_DIRTY when it's written to.
457 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
458 * these bits on PTEs immediately anyway. This is done to save the CPU from
459 * having to update them, but it helps us the same way: if they set
460 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
461 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
463 static void do_set_pte(struct lguest *lg, int idx,
464 unsigned long vaddr, pte_t gpte)
466 /* Look up the matching shadow page directot entry. */
467 pgd_t *spgd = spgd_addr(lg, idx, vaddr);
469 /* If the top level isn't present, there's no entry to update. */
470 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
471 /* Otherwise, we start by releasing the existing entry. */
472 pte_t *spte = spte_addr(lg, *spgd, vaddr);
475 /* If they're setting this entry as dirty or accessed, we might
476 * as well put that entry they've given us in now. This shaves
477 * 10% off a copy-on-write micro-benchmark. */
478 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
479 check_gpte(lg, gpte);
480 *spte = gpte_to_spte(lg, gpte,
481 pte_flags(gpte) & _PAGE_DIRTY);
483 /* Otherwise we can demand_page() it in later. */
488 /*H:410 Updating a PTE entry is a little trickier.
490 * We keep track of several different page tables (the Guest uses one for each
491 * process, so it makes sense to cache at least a few). Each of these have
492 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
493 * all processes. So when the page table above that address changes, we update
494 * all the page tables, not just the current one. This is rare.
496 * The benefit is that when we have to track a new page table, we can copy keep
497 * all the kernel mappings. This speeds up context switch immensely. */
498 void guest_set_pte(struct lguest *lg,
499 unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
501 /* Kernel mappings must be changed on all top levels. Slow, but
502 * doesn't happen often. */
503 if (vaddr >= lg->page_offset) {
505 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
506 if (lg->pgdirs[i].pgdir)
507 do_set_pte(lg, i, vaddr, gpte);
509 /* Is this page table one we have a shadow for? */
510 int pgdir = find_pgdir(lg, gpgdir);
511 if (pgdir != ARRAY_SIZE(lg->pgdirs))
512 /* If so, do the update. */
513 do_set_pte(lg, pgdir, vaddr, gpte);
518 * (iii) Setting up a page table entry when the Guest tells us it has changed.
520 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
521 * with the other side of page tables while we're here: what happens when the
522 * Guest asks for a page table to be updated?
524 * We already saw that demand_page() will fill in the shadow page tables when
525 * needed, so we can simply remove shadow page table entries whenever the Guest
526 * tells us they've changed. When the Guest tries to use the new entry it will
527 * fault and demand_page() will fix it up.
529 * So with that in mind here's our code to to update a (top-level) PGD entry:
531 void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
535 /* The kernel seems to try to initialize this early on: we ignore its
536 * attempts to map over the Switcher. */
537 if (idx >= SWITCHER_PGD_INDEX)
540 /* If they're talking about a page table we have a shadow for... */
541 pgdir = find_pgdir(lg, gpgdir);
542 if (pgdir < ARRAY_SIZE(lg->pgdirs))
543 /* ... throw it away. */
544 release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
547 /*H:500 (vii) Setting up the page tables initially.
549 * When a Guest is first created, the Launcher tells us where the toplevel of
550 * its first page table is. We set some things up here: */
551 int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
553 /* In flush_user_mappings() we loop from 0 to
554 * "pgd_index(lg->page_offset)". This assumes it won't hit
555 * the Switcher mappings, so check that now. */
556 if (pgd_index(lg->page_offset) >= SWITCHER_PGD_INDEX)
558 /* We start on the first shadow page table, and give it a blank PGD
561 lg->pgdirs[lg->pgdidx].gpgdir = pgtable;
562 lg->pgdirs[lg->pgdidx].pgdir = (pgd_t*)get_zeroed_page(GFP_KERNEL);
563 if (!lg->pgdirs[lg->pgdidx].pgdir)
568 /* When a Guest dies, our cleanup is fairly simple. */
569 void free_guest_pagetable(struct lguest *lg)
573 /* Throw away all page table pages. */
574 release_all_pagetables(lg);
575 /* Now free the top levels: free_page() can handle 0 just fine. */
576 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
577 free_page((long)lg->pgdirs[i].pgdir);
580 /*H:480 (vi) Mapping the Switcher when the Guest is about to run.
582 * The Switcher and the two pages for this CPU need to be available to the
583 * Guest (and not the pages for other CPUs). We have the appropriate PTE pages
584 * for each CPU already set up, we just need to hook them in. */
585 void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages)
587 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
591 /* Make the last PGD entry for this Guest point to the Switcher's PTE
592 * page for this CPU (with appropriate flags). */
593 switcher_pgd = __pgd(__pa(switcher_pte_page) | _PAGE_KERNEL);
595 lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
597 /* We also change the Switcher PTE page. When we're running the Guest,
598 * we want the Guest's "regs" page to appear where the first Switcher
599 * page for this CPU is. This is an optimization: when the Switcher
600 * saves the Guest registers, it saves them into the first page of this
601 * CPU's "struct lguest_pages": if we make sure the Guest's register
602 * page is already mapped there, we don't have to copy them out
604 regs_pte = pfn_pte (__pa(lg->regs_page) >> PAGE_SHIFT, __pgprot(_PAGE_KERNEL));
605 switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte;
609 static void free_switcher_pte_pages(void)
613 for_each_possible_cpu(i)
614 free_page((long)switcher_pte_page(i));
617 /*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given
618 * the CPU number and the "struct page"s for the Switcher code itself.
620 * Currently the Switcher is less than a page long, so "pages" is always 1. */
621 static __init void populate_switcher_pte_page(unsigned int cpu,
622 struct page *switcher_page[],
626 pte_t *pte = switcher_pte_page(cpu);
628 /* The first entries are easy: they map the Switcher code. */
629 for (i = 0; i < pages; i++) {
630 pte[i] = mk_pte(switcher_page[i],
631 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
634 /* The only other thing we map is this CPU's pair of pages. */
637 /* First page (Guest registers) is writable from the Guest */
638 pte[i] = pfn_pte(page_to_pfn(switcher_page[i]),
639 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW));
641 /* The second page contains the "struct lguest_ro_state", and is
643 pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]),
644 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
647 /*H:510 At boot or module load time, init_pagetables() allocates and populates
648 * the Switcher PTE page for each CPU. */
649 __init int init_pagetables(struct page **switcher_page, unsigned int pages)
653 for_each_possible_cpu(i) {
654 switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL);
655 if (!switcher_pte_page(i)) {
656 free_switcher_pte_pages();
659 populate_switcher_pte_page(i, switcher_page, pages);
665 /* Cleaning up simply involves freeing the PTE page for each CPU. */
666 void free_pagetables(void)
668 free_switcher_pte_pages();