2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <asm/mmu-44x.h>
26 #include <asm/kvm_ppc.h>
30 #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
31 #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
33 static unsigned int kvmppc_tlb_44x_pos;
35 static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
37 /* Mask off reserved bits. */
38 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;
41 /* Guest is in supervisor mode, so we need to translate guest
42 * supervisor permissions into user permissions. */
43 attrib &= ~PPC44x_TLB_USER_PERM_MASK;
44 attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
47 /* Make sure host can always access this memory. */
48 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
53 /* Search the guest TLB for a matching entry. */
54 int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
59 /* XXX Replace loop with fancy data structures. */
60 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
61 struct tlbe *tlbe = &vcpu->arch.guest_tlb[i];
64 if (eaddr < get_tlb_eaddr(tlbe))
67 if (eaddr > get_tlb_end(tlbe))
70 tid = get_tlb_tid(tlbe);
71 if (tid && (tid != pid))
77 if (get_tlb_ts(tlbe) != as)
86 struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
88 unsigned int as = !!(vcpu->arch.msr & MSR_IS);
91 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
94 return &vcpu->arch.guest_tlb[index];
97 struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
99 unsigned int as = !!(vcpu->arch.msr & MSR_DS);
102 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
105 return &vcpu->arch.guest_tlb[index];
108 static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
110 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
113 static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
116 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
117 struct page *page = vcpu->arch.shadow_pages[index];
119 if (get_tlb_v(stlbe)) {
120 if (kvmppc_44x_tlbe_is_writable(stlbe))
121 kvm_release_page_dirty(page);
123 kvm_release_page_clean(page);
127 void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
129 vcpu->arch.shadow_tlb_mod[i] = 1;
132 /* Caller must ensure that the specified guest TLB entry is safe to insert into
134 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
137 struct page *new_page;
142 /* Future optimization: don't overwrite the TLB entry containing the
143 * current PC (or stack?). */
144 victim = kvmppc_tlb_44x_pos++;
145 if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
146 kvmppc_tlb_44x_pos = 0;
147 stlbe = &vcpu->arch.shadow_tlb[victim];
149 /* Get reference to new page. */
150 down_read(¤t->mm->mmap_sem);
151 new_page = gfn_to_page(vcpu->kvm, gfn);
152 up_read(¤t->mm->mmap_sem);
153 if (is_error_page(new_page)) {
154 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
155 kvm_release_page_clean(new_page);
158 hpaddr = page_to_phys(new_page);
160 /* Drop reference to old page. */
161 kvmppc_44x_shadow_release(vcpu, victim);
163 vcpu->arch.shadow_pages[victim] = new_page;
165 /* XXX Make sure (va, size) doesn't overlap any other
166 * entries. 440x6 user manual says the result would be
169 /* XXX what about AS? */
171 stlbe->tid = !(asid & 0xff);
173 /* Force TS=1 for all guest mappings. */
174 /* For now we hardcode 4KB mappings, but it will be important to
175 * use host large pages in the future. */
176 stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
178 stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
179 stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
180 vcpu->arch.msr & MSR_PR);
181 kvmppc_tlbe_set_modified(vcpu, victim);
183 KVMTRACE_5D(STLB_WRITE, vcpu, victim,
184 stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
188 void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
189 gva_t eend, u32 asid)
191 unsigned int pid = !(asid & 0xff);
194 /* XXX Replace loop with fancy data structures. */
195 for (i = 0; i <= tlb_44x_hwater; i++) {
196 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
199 if (!get_tlb_v(stlbe))
202 if (eend < get_tlb_eaddr(stlbe))
205 if (eaddr > get_tlb_end(stlbe))
208 tid = get_tlb_tid(stlbe);
209 if (tid && (tid != pid))
212 kvmppc_44x_shadow_release(vcpu, i);
214 kvmppc_tlbe_set_modified(vcpu, i);
215 KVMTRACE_5D(STLB_INVAL, vcpu, i,
216 stlbe->tid, stlbe->word0, stlbe->word1,
217 stlbe->word2, handler);
221 /* Invalidate all mappings on the privilege switch after PID has been changed.
222 * The guest always runs with PID=1, so we must clear the entire TLB when
223 * switching address spaces. */
224 void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
228 if (vcpu->arch.swap_pid) {
229 /* XXX Replace loop with fancy data structures. */
230 for (i = 0; i <= tlb_44x_hwater; i++) {
231 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
233 /* Future optimization: clear only userspace mappings. */
234 kvmppc_44x_shadow_release(vcpu, i);
236 kvmppc_tlbe_set_modified(vcpu, i);
237 KVMTRACE_5D(STLB_INVAL, vcpu, i,
238 stlbe->tid, stlbe->word0, stlbe->word1,
239 stlbe->word2, handler);
241 vcpu->arch.swap_pid = 0;
244 vcpu->arch.shadow_pid = !usermode;