direct-io.hg

view xen/arch/powerpc/powerpc64/domain.c @ 11490:b82a8107cae6

[POWERPC][XEN] Adjust DEC correctly

If DEC would have expired we now restore DEC to 0 so it will fire.
The Domain loses info about how late DEC was from looking at DEC but I
don't think anyone tracks that.

Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Fri Sep 01 12:12:43 2006 -0400 (2006-09-01)
parents 050de6b53961
children 464acece0dad
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005
17 *
18 * Authors: Jimi Xenidis <jimix@watson.ibm.com>
19 */
21 #include <xen/config.h>
22 #include <xen/lib.h>
23 #include <xen/sched.h>
24 #include <xen/mm.h>
25 #include <asm/current.h>
27 void save_sprs(struct vcpu *v)
28 {
29 v->arch.timebase = mftb();
31 v->arch.sprg[0] = mfsprg0();
32 v->arch.sprg[1] = mfsprg1();
33 v->arch.sprg[2] = mfsprg2();
34 v->arch.sprg[3] = mfsprg3();
36 v->arch.dar = mfdar();
37 v->arch.dsisr = mfdsisr();
39 save_cpu_sprs(v);
40 }
42 void load_sprs(struct vcpu *v)
43 {
44 ulong timebase_delta;
46 mtsprg0(v->arch.sprg[0]);
47 mtsprg1(v->arch.sprg[1]);
48 mtsprg2(v->arch.sprg[2]);
49 mtsprg3(v->arch.sprg[3]);
50 mtdar(v->arch.dar);
51 mtdsisr(v->arch.dsisr);
53 load_cpu_sprs(v);
55 /* adjust the DEC value to account for cycles while not
56 * running this OS */
57 timebase_delta = mftb() - v->arch.timebase;
58 if (timebase_delta > v->arch.dec)
59 v->arch.dec = 0;
60 else
61 v->arch.dec -= timebase_delta;
62 }
64 /* XXX evaluate all isyncs in segment code */
66 static void flush_slb(struct vcpu *v)
67 {
68 struct slb_entry *slb0 = &v->arch.slb_entries[0];
70 slbia();
72 /* we manually have to invalidate SLB[0] since slbia doesn't. */
73 /* XXX name magic constants! */
74 if (slb0->slb_esid & (1 << (63 - 36))) {
75 ulong rb;
76 ulong class;
78 class = (slb0->slb_vsid >> (63 - 56)) & 1ULL;
79 rb = slb0->slb_esid & (~0ULL << (63 - 35));
80 rb |= class << (63 - 36);
82 slbie(rb);
83 }
84 }
86 void save_segments(struct vcpu *v)
87 {
88 struct slb_entry *slb_entry = v->arch.slb_entries;
89 int i;
91 /* save all extra SLBs */
92 for (i = 0; i < NUM_SLB_ENTRIES; i++) {
93 ulong vsid;
94 ulong esid;
96 __asm__ __volatile__(
97 "slbmfev %0,%2\n"
98 "slbmfee %1,%2\n"
99 :"=&r"(vsid), "=&r"(esid)
100 :"r"(i)
101 :"memory");
103 /* FIXME: should we bother to save invalid entries? */
104 slb_entry[i].slb_vsid = vsid;
105 slb_entry[i].slb_esid = esid;
106 #ifdef SLB_DEBUG
107 if (vsid != 0) {
108 printf("%s: DOM[0x%x]: S%02d: 0x%016lx 0x%016lx\n",
109 __func__, v->domain->domain_id, i, vsid, esid);
110 }
111 #endif
112 }
114 flush_slb(v);
115 }
117 void load_segments(struct vcpu *v)
118 {
119 struct slb_entry *slb_entry = v->arch.slb_entries;
120 int i;
122 /* restore all extra SLBs */
123 for (i = 0; i < NUM_SLB_ENTRIES; i++) {
124 ulong vsid = slb_entry[i].slb_vsid;
125 ulong esid = slb_entry[i].slb_esid;
127 /* FIXME: should we bother to restore invalid entries */
128 /* stuff in the index here */
129 esid |= i & ((0x1UL << (63 - 52 + 1)) - 1);
131 __asm__ __volatile__(
132 "isync\n"
133 "slbmte %0,%1\n"
134 "isync\n"
135 :
136 :"r" (vsid), "r"(esid)
137 :"memory");
139 #ifdef SLB_DEBUG
140 if (vsid != 0) {
141 printf("%s: DOM[0x%x]: R%02d: 0x%016lx 0x%016lx\n",
142 __func__, v->domain->domain_id, i, vsid, esid);
143 }
144 #endif
145 }
146 }