direct-io.hg

view xen/arch/powerpc/papr/xlate.c @ 11487:4fdf5151b187

[POWERPC] merge with xen-unstable.hg
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author hollisb@localhost
date Mon Sep 18 12:48:56 2006 -0500 (2006-09-18)
parents 43ec7afa5734
children 2ba5452795d2
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005
17 *
18 * Authors: Jimi Xenidis <jimix@watson.ibm.com>
19 */
21 #undef DEBUG
22 #undef DEBUG_FAIL
24 #include <xen/config.h>
25 #include <xen/types.h>
26 #include <xen/sched.h>
27 #include <xen/init.h>
28 #include <public/xen.h>
29 #include <asm/current.h>
30 #include <asm/papr.h>
31 #include <asm/hcalls.h>
33 #ifdef USE_PTE_INSERT
34 static inline void pte_insert(union pte volatile *pte,
35 ulong vsid, ulong rpn, ulong lrpn)
36 {
37 /*
38 * It's required that external locking be done to provide
39 * exclusion between the choices of insertion points. Any valid
40 * choice of pte requires that the pte be invalid upon entry to
41 * this function.
42 */
44 ASSERT( (pte->bits.v == 0) );
46 /* Set shadow word. */
47 (void)lrpn;
49 /* Set the second word first so the valid bit is the last thing set */
50 pte->words.rpn = rpn;
52 /* Guarantee the second word is visible before the valid bit */
53 __asm__ __volatile__("eieio" : : : "memory");
55 /* Now set the first word including the valid bit */
56 pte->words.vsid = vsid;
57 /* Architecturally this instruction will cause a heavier operation
58 * if this one is not supported. note: on come machines like Cell
59 * this coul dbe a nop */
60 __asm__ __volatile__("ptesync" : : : "memory");
61 }
62 #endif
64 static void pte_tlbie(union pte volatile *pte, ulong ptex)
65 {
66 ulong va;
67 ulong vsid;
68 ulong group;
69 ulong pi;
70 ulong pi_high;
72 vsid = pte->bits.avpn >> 5;
73 group = ptex >> 3;
74 if (pte->bits.h) {
75 group = ~group;
76 }
77 pi = (vsid ^ group) & 0x7ff;
78 pi_high = (pte->bits.avpn & 0x1f) << 11;
79 pi |= pi_high;
80 va = (pi << 12) | (vsid << 28);
81 va &= ~(0xffffULL << 48);
83 #ifndef FLUSH_THE_WHOLE_THING
84 if (pte->bits.l) {
85 va |= (pte->bits.rpn & 1);
86 asm volatile("ptesync ;tlbie %0,1" : : "r"(va) : "memory");
87 } else {
88 asm volatile("ptesync; tlbie %0,0" : : "r"(va) : "memory");
89 }
90 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
91 #else
92 {
93 unsigned i;
94 ulong rb;
96 for (i = 0; i < 256; i++) {
97 rb = i;
98 rb <<= 12;
99 asm volatile("ptesync; tlbie %0,0; eieio; tlbsync; ptesync; isync"
100 : "=r" (rb): : "memory");
101 asm volatile("ptesync; tlbie %0,1; eieio; tlbsync; ptesync; isync"
102 : "=r" (rb): : "memory");
103 }
104 }
105 #endif
107 }
109 static void h_enter(struct cpu_user_regs *regs)
110 {
111 ulong flags = regs->gprs[4];
112 ulong ptex = regs->gprs[5];
114 union pte pte;
115 union pte volatile *ppte;
116 struct domain_htab *htab;
117 int lp_bits = 0;
118 int pgshift = PAGE_SHIFT;
119 ulong idx;
120 int limit = 0; /* how many PTEs to examine in the PTEG */
121 ulong lpn;
122 ulong rpn;
123 struct vcpu *v = get_current();
124 struct domain *d = v->domain;
125 int mtype;
127 htab = &d->arch.htab;
128 if (ptex > (1UL << htab->log_num_ptes)) {
129 regs->gprs[3] = H_Parameter;
130 printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
131 return;
132 }
134 /* use local HPTE to avoid manual shifting & masking */
135 pte.words.vsid = regs->gprs[6];
136 pte.words.rpn = regs->gprs[7];
138 if ( pte.bits.l ) { /* large page? */
139 /* figure out the page size for the selected large page */
140 ulong lp_rpn = pte.bits.rpn;
141 uint lp_size = 0;
143 while ( lp_rpn & 0x1 ) {
144 lp_rpn >>= 1;
145 lp_bits = ((lp_bits << 1) | 0x1);
146 lp_size++;
147 }
149 if ( lp_size >= d->arch.large_page_sizes ) {
150 printk("%s: attempt to use unsupported lp_size %d\n",
151 __func__, lp_size);
152 regs->gprs[3] = H_Parameter;
153 return;
154 }
156 /* get correct pgshift value */
157 pgshift = d->arch.large_page_order[lp_size] + PAGE_SHIFT;
158 }
160 /* get the correct logical RPN in terms of 4K pages need to mask
161 * off lp bits and unused arpn bits if this is a large page */
163 lpn = ~0ULL << (pgshift - PAGE_SHIFT);
164 lpn = pte.bits.rpn & lpn;
166 rpn = pfn2mfn(d, lpn, &mtype);
167 if (rpn == INVALID_MFN) {
168 regs->gprs[3] = H_Parameter;
169 return;
170 }
172 if (mtype == PFN_TYPE_IO) {
173 /* only a privilaged dom can access outside IO space */
174 if ( !test_bit(_DOMF_privileged, &d->domain_flags) ) {
175 regs->gprs[3] = H_Privilege;
176 printk("%s: unprivileged access to logical page: 0x%lx\n",
177 __func__, lpn);
178 return;
179 }
181 if ( !((pte.bits.w == 0)
182 && (pte.bits.i == 1)
183 && (pte.bits.g == 1)) ) {
184 #ifdef DEBUG_FAIL
185 printk("%s: expecting an IO WIMG "
186 "w=%x i=%d m=%d, g=%d\n word 0x%lx\n", __func__,
187 pte.bits.w, pte.bits.i, pte.bits.m, pte.bits.g,
188 pte.words.rpn);
189 #endif
190 regs->gprs[3] = H_Parameter;
191 return;
192 }
193 }
194 /* fixup the RPN field of our local PTE copy */
195 pte.bits.rpn = rpn | lp_bits;
197 /* clear reserved bits in high word */
198 pte.bits.lock = 0x0;
199 pte.bits.res = 0x0;
201 /* clear reserved bits in low word */
202 pte.bits.pp0 = 0x0;
203 pte.bits.ts = 0x0;
204 pte.bits.res2 = 0x0;
206 if ( !(flags & H_EXACT) ) {
207 /* PTEG (not specific PTE); clear 3 lowest bits */
208 ptex &= ~0x7UL;
209 limit = 7;
210 }
212 /* data manipulations should be done prior to the pte insertion. */
213 if ( flags & H_ZERO_PAGE ) {
214 memset((void *)(rpn << PAGE_SHIFT), 0, 1UL << pgshift);
215 }
217 if ( flags & H_ICACHE_INVALIDATE ) {
218 ulong k;
219 ulong addr = rpn << PAGE_SHIFT;
221 for (k = 0; k < (1UL << pgshift); k += L1_CACHE_BYTES) {
222 dcbst(addr + k);
223 sync();
224 icbi(addr + k);
225 sync();
226 isync();
227 }
228 }
230 if ( flags & H_ICACHE_SYNCHRONIZE ) {
231 ulong k;
232 ulong addr = rpn << PAGE_SHIFT;
233 for (k = 0; k < (1UL << pgshift); k += L1_CACHE_BYTES) {
234 icbi(addr + k);
235 sync();
236 isync();
237 }
238 }
240 for (idx = ptex; idx <= ptex + limit; idx++) {
241 ppte = &htab->map[idx];
243 if ( ppte->bits.v == 0 && ppte->bits.lock == 0) {
244 /* got it */
246 asm volatile(
247 "std %1, 8(%0); eieio; std %2, 0(%0); ptesync"
248 :
249 : "b" (ppte), "r" (pte.words.rpn), "r" (pte.words.vsid)
250 : "memory");
252 regs->gprs[3] = H_Success;
253 regs->gprs[4] = idx;
255 return;
256 }
257 }
259 #ifdef DEBUG
260 /* If the PTEG is full then no additional values are returned. */
261 printk("%s: PTEG FULL\n", __func__);
262 #endif
264 regs->gprs[3] = H_PTEG_Full;
265 }
267 static void h_protect(struct cpu_user_regs *regs)
268 {
269 ulong flags = regs->gprs[4];
270 ulong ptex = regs->gprs[5];
271 ulong avpn = regs->gprs[6];
272 struct vcpu *v = get_current();
273 struct domain *d = v->domain;
274 struct domain_htab *htab = &d->arch.htab;
275 union pte volatile *ppte;
276 union pte lpte;
278 #ifdef DEBUG
279 printk("%s: flags: 0x%lx ptex: 0x%lx avpn: 0x%lx\n", __func__,
280 flags, ptex, avpn);
281 #endif
282 if ( ptex > (1UL << htab->log_num_ptes) ) {
283 regs->gprs[3] = H_Parameter;
284 printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
285 return;
286 }
287 ppte = &htab->map[ptex];
289 lpte.words.vsid = ppte->words.vsid;
290 lpte.words.rpn = ppte->words.rpn;
292 /* the AVPN param occupies the bit-space of the word */
293 if ( (flags & H_AVPN) && lpte.bits.avpn != avpn >> 7 ) {
294 #ifdef DEBUG_FAIL
295 printk("%s: %p: AVPN check failed: 0x%lx, 0x%lx\n", __func__,
296 ppte, lpte.words.vsid, lpte.words.rpn);
297 #endif
298 regs->gprs[3] = H_Not_Found;
299 return;
300 }
302 if (lpte.bits.v == 0) {
303 /* the PAPR does not specify what to do here, this is because
304 * we invalidate entires where the PAPR says to 0 the whole hi
305 * dword, so the AVPN should catch this first */
307 #ifdef DEBUG_FAIL
308 printk("%s: pte invalid\n", __func__);
309 #endif
310 regs->gprs[3] = H_Not_Found;
311 return;
312 }
314 lpte.bits.v = 0;
316 /* ppte->words.vsid = lpte.words.vsid; */
317 asm volatile(
318 "eieio; std %1, 0(%0); ptesync"
319 :
320 : "b" (ppte), "r" (0)
321 : "memory");
323 pte_tlbie(&lpte, ptex);
325 /* We never touch pp0, and PP bits in flags are in the right
326 * order */
327 lpte.bits.pp1 = flags & (H_PP1 | H_PP2);
328 lpte.bits.n = (flags & H_N) ? 1 : 0;
330 lpte.bits.v = 1;
331 lpte.bits.r = 0;
333 asm volatile(
334 "std %1, 8(%0); eieio; std %2, 0(%0); ptesync"
335 :
336 : "b" (ppte), "r" (lpte.words.rpn), "r" (lpte.words.vsid)
337 : "memory");
339 regs->gprs[3] = H_Success;
340 }
342 static void h_clear_ref(struct cpu_user_regs *regs)
343 {
344 ulong flags = regs->gprs[4];
345 ulong ptex = regs->gprs[5];
346 struct vcpu *v = get_current();
347 struct domain *d = v->domain;
348 struct domain_htab *htab = &d->arch.htab;
349 union pte volatile *pte;
350 union pte lpte;
352 #ifdef DEBUG
353 printk("%s: flags: 0x%lx ptex: 0x%lx\n", __func__,
354 flags, ptex);
355 #endif
357 if (flags != 0) {
358 printk("WARNING: %s: "
359 "flags are undefined and should be 0: 0x%lx\n",
360 __func__, flags);
361 }
363 if (ptex > (1UL << htab->log_num_ptes)) {
364 regs->gprs[3] = H_Parameter;
365 printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
366 return;
367 }
368 pte = &htab->map[ptex];
369 lpte.words.rpn = pte->words.rpn;
371 regs->gprs[4] = lpte.words.rpn;
373 if (lpte.bits.r != 0) {
374 lpte.bits.r = 0;
376 asm volatile("std %1, 8(%0); eieio; ptesync"
377 :
378 : "b" (pte), "r" (lpte.words.rpn) : "memory");
380 pte_tlbie(&lpte, ptex);
381 }
382 regs->gprs[3] = H_Success;
383 }
385 static void h_clear_mod(struct cpu_user_regs *regs)
386 {
387 ulong flags = regs->gprs[4];
388 ulong ptex = regs->gprs[5];
389 struct vcpu *v = get_current();
390 struct domain *d = v->domain;
391 struct domain_htab *htab = &d->arch.htab;
392 union pte volatile *pte;
393 union pte lpte;
395 #ifdef DEBUG
396 printk("%s: flags: 0x%lx ptex: 0x%lx\n", __func__,
397 flags, ptex);
398 #endif
399 if (flags != 0) {
400 printk("WARNING: %s: "
401 "flags are undefined and should be 0: 0x%lx\n",
402 __func__, flags);
403 }
405 if (ptex > (1UL << htab->log_num_ptes)) {
406 regs->gprs[3] = H_Parameter;
407 printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
408 return;
409 }
410 pte = &htab->map[ptex];
411 lpte.words.vsid = pte->words.vsid;
412 lpte.words.rpn = pte->words.rpn;
414 regs->gprs[3] = H_Success;
415 regs->gprs[4] = lpte.words.rpn;
417 if (lpte.bits.c != 0) {
418 /* invalidate */
419 asm volatile(
420 "eieio; std %1, 0(%0); ptesync"
421 :
422 : "b" (pte), "r" (0)
423 : "memory");
425 pte_tlbie(&lpte, ptex);
427 lpte.bits.c = 0;
428 asm volatile(
429 "std %1, 8(%0); eieio; std %2, 0(%0); ptesync"
430 :
431 : "b" (pte), "r" (lpte.words.rpn), "r" (lpte.words.vsid)
432 : "memory");
433 }
434 }
436 static void h_remove(struct cpu_user_regs *regs)
437 {
438 ulong flags = regs->gprs[4];
439 ulong ptex = regs->gprs[5];
440 ulong avpn = regs->gprs[6];
441 struct vcpu *v = get_current();
442 struct domain *d = v->domain;
443 struct domain_htab *htab = &d->arch.htab;
444 union pte volatile *pte;
445 union pte lpte;
447 #ifdef DEBUG
448 printk("%s: flags: 0x%lx ptex: 0x%lx avpn: 0x%lx\n", __func__,
449 flags, ptex, avpn);
450 #endif
451 if ( ptex > (1UL << htab->log_num_ptes) ) {
452 regs->gprs[3] = H_Parameter;
453 printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
454 return;
455 }
456 pte = &htab->map[ptex];
457 lpte.words.vsid = pte->words.vsid;
458 lpte.words.rpn = pte->words.rpn;
460 if ((flags & H_AVPN) && lpte.bits.avpn != (avpn >> 7)) {
461 #ifdef DEBUG_FAIL
462 printk("%s: avpn doesn not match\n", __func__);
463 #endif
464 regs->gprs[3] = H_Not_Found;
465 return;
466 }
468 if ((flags & H_ANDCOND) && ((avpn & pte->words.vsid) != 0)) {
469 #ifdef DEBUG_FAIL
470 printk("%s: andcond does not match\n", __func__);
471 #endif
472 regs->gprs[3] = H_Not_Found;
473 return;
474 }
476 regs->gprs[3] = H_Success;
477 /* return old PTE in regs 4 and 5 */
478 regs->gprs[4] = lpte.words.vsid;
479 regs->gprs[5] = lpte.words.rpn;
481 /* XXX - I'm very skeptical of doing ANYTHING if not bits.v */
482 /* XXX - I think the spec should be questioned in this case (MFM) */
483 if (pte->bits.v == 0) {
484 printk("%s: removing invalid entry\n", __func__);
485 }
486 asm volatile("eieio; std %1, 0(%0); ptesync"
487 :
488 : "b" (pte), "r" (0)
489 : "memory");
491 pte_tlbie(&lpte, ptex);
492 }
494 static void h_read(struct cpu_user_regs *regs)
495 {
496 ulong flags = regs->gprs[4];
497 ulong ptex = regs->gprs[5];
498 struct vcpu *v = get_current();
499 struct domain *d = v->domain;
500 struct domain_htab *htab = &d->arch.htab;
501 union pte volatile *pte;
503 if (flags & H_READ_4)
504 ptex &= ~0x3UL;
506 if (ptex > (1UL << htab->log_num_ptes)) {
507 regs->gprs[3] = H_Parameter;
508 printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
509 return;
510 }
511 pte = &htab->map[ptex];
512 regs->gprs[4] = pte[0].words.vsid;
513 regs->gprs[5] = pte[0].words.rpn;
515 if (!(flags & H_READ_4)) {
516 /* dump another 3 PTEs */
517 regs->gprs[6] = pte[1].words.vsid;
518 regs->gprs[7] = pte[1].words.rpn;
519 regs->gprs[8] = pte[2].words.vsid;
520 regs->gprs[9] = pte[2].words.rpn;
521 regs->gprs[10] = pte[3].words.vsid;
522 regs->gprs[11] = pte[3].words.rpn;
523 }
525 regs->gprs[3] = H_Success;
526 }
528 __init_papr_hcall(H_ENTER, h_enter);
529 __init_papr_hcall(H_READ, h_read);
530 __init_papr_hcall(H_REMOVE, h_remove);
531 __init_papr_hcall(H_CLEAR_MOD, h_clear_mod);
532 __init_papr_hcall(H_CLEAR_REF, h_clear_ref);
533 __init_papr_hcall(H_PROTECT, h_protect);