direct-io.hg

view xen/arch/powerpc/powerpc64/ppc970.c @ 11498:464acece0dad

[POWERPC][XEN] Clear SLB entries on boot and other cleanups

This patch clears and SLB entries that might have been left behind by
Firmware and also cleans up the Save and Restore of the segments.

Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Thu Sep 07 02:21:17 2006 -0400 (2006-09-07)
parents 22e01a4864b0
children 2ebf55e419c9
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jimi Xenidis <jimix@watson.ibm.com>
20 * Amos Waterland <apw@us.ibm.com>
21 */
23 #include <xen/config.h>
24 #include <xen/types.h>
25 #include <xen/mm.h>
26 #include <xen/sched.h>
27 #include <xen/lib.h>
28 #include <asm/time.h>
29 #include <asm/current.h>
30 #include <asm/powerpc64/procarea.h>
31 #include <asm/powerpc64/processor.h>
32 #include <asm/powerpc64/ppc970-hid.h>
34 #undef DEBUG
35 #undef SERIALIZE
37 struct rma_settings {
38 int order;
39 int rmlr_0;
40 int rmlr_1_2;
41 };
43 static struct rma_settings rma_orders[] = {
44 { .order = 26, .rmlr_0 = 0, .rmlr_1_2 = 3, }, /* 64 MB */
45 { .order = 27, .rmlr_0 = 1, .rmlr_1_2 = 3, }, /* 128 MB */
46 { .order = 28, .rmlr_0 = 1, .rmlr_1_2 = 0, }, /* 256 MB */
47 { .order = 30, .rmlr_0 = 0, .rmlr_1_2 = 2, }, /* 1 GB */
48 { .order = 34, .rmlr_0 = 0, .rmlr_1_2 = 1, }, /* 16 GB */
49 { .order = 38, .rmlr_0 = 0, .rmlr_1_2 = 0, }, /* 256 GB */
50 };
52 static uint log_large_page_sizes[] = {
53 4 + 20, /* (1 << 4) == 16M */
54 };
56 static struct rma_settings *cpu_find_rma(unsigned int order)
57 {
58 int i;
59 for (i = 0; i < ARRAY_SIZE(rma_orders); i++) {
60 if (rma_orders[i].order == order)
61 return &rma_orders[i];
62 }
63 return NULL;
64 }
66 unsigned int cpu_default_rma_order_pages(void)
67 {
68 return rma_orders[0].order - PAGE_SHIFT;
69 }
71 int cpu_rma_valid(unsigned int log)
72 {
73 return cpu_find_rma(log) != NULL;
74 }
76 unsigned int cpu_large_page_orders(uint *sizes, uint max)
77 {
78 uint i = 0;
80 while (i < max && i < ARRAY_SIZE(log_large_page_sizes)) {
81 sizes[i] = log_large_page_sizes[i] - PAGE_SHIFT;
82 ++i;
83 }
85 return i;
86 }
88 unsigned int cpu_extent_order(void)
89 {
90 return log_large_page_sizes[0] - PAGE_SHIFT;
91 }
94 /* This is more a platform thing than a CPU thing, but we only have
95 * one platform now */
96 int cpu_io_mfn(ulong mfn)
97 {
98 /* totally cheating */
99 if (mfn >= (2UL << (30 - PAGE_SHIFT)) && /* 2GiB */
100 mfn < (4UL << (30 - PAGE_SHIFT))) /* 4GiB */
101 return 1;
103 return 0;
104 }
106 static u64 cpu0_hids[6];
107 static u64 cpu0_hior;
109 void cpu_initialize(int cpuid)
110 {
111 ulong r1, r2;
112 union hid0 hid0;
113 union hid1 hid1;
114 union hid4 hid4;
115 union hid5 hid5;
117 __asm__ __volatile__ ("mr %0, 1" : "=r" (r1));
118 __asm__ __volatile__ ("mr %0, 2" : "=r" (r2));
120 if (cpuid == 0) {
121 /* we can assume that these are sane to start with. We
122 * _do_not_ store the results in case we want to mess with them
123 * on a per-cpu basis later. */
124 cpu0_hids[0] = mfhid0();
125 cpu0_hids[1] = mfhid1();
126 cpu0_hids[4] = mfhid4();
127 cpu0_hids[5] = mfhid5();
128 cpu0_hior = 0;
129 }
131 hid0.word = cpu0_hids[0];
132 hid1.word = cpu0_hids[1];
133 hid4.word = cpu0_hids[4];
134 hid5.word = cpu0_hids[5];
136 /* This is SMP safe because the compiler must use r13 for it. */
137 parea = global_cpu_table[cpuid];
138 ASSERT(parea != NULL);
140 mthsprg0((ulong)parea); /* now ready for exceptions */
142 /* Set decrementers for 1 second to keep them out of the way during
143 * intialization. */
144 /* XXX make tickless */
145 mtdec(timebase_freq);
146 mthdec(timebase_freq);
148 hid0.bits.nap = 1; /* NAP */
149 hid0.bits.dpm = 1; /* Dynamic Power Management */
150 hid0.bits.nhr = 0; /* ! Not Hard Reset */
151 hid0.bits.hdice_en = 1; /* enable HDEC */
152 hid0.bits.en_therm = 0; /* ! Enable ext thermal ints */
153 /* onlu debug Xen should do this */
154 hid0.bits.en_attn = 1; /* Enable attn instruction */
156 #ifdef SERIALIZE
157 hid0.bits.one_ppc = 1;
158 hid0.bits.isync_sc = 1;
159 hid0.bits.inorder = 1;
160 /* may not want these */
161 hid0.bits.do_single = 1;
162 hid0.bits.ser-gp = 1;
163 #endif
165 printk("CPU #%d: Hello World! SP = %lx TOC = %lx HID0 = %lx\n",
166 smp_processor_id(), r1, r2, hid0.word);
168 mthid0(hid0.word);
170 hid1.bits.bht_pm = 7; /* branch history table prediction mode */
171 hid1.bits.en_ls = 1; /* enable link stack */
173 hid1.bits.en_cc = 1; /* enable count cache */
174 hid1.bits.en_ic = 1; /* enable inst cache */
176 hid1.bits.pf_mode = 2; /* prefetch mode */
178 hid1.bits.en_if_cach = 1; /* i-fetch cacheability control */
179 hid1.bits.en_ic_rec = 1; /* i-cache parity error recovery */
180 hid1.bits.en_id_rec = 1; /* i-dir parity error recovery */
181 hid1.bits.en_er_rec = 1; /* i-ERAT parity error recovery */
183 hid1.bits.en_sp_itw = 1; /* En speculative tablewalks */
184 mthid1(hid1.word);
186 /* no changes to hid4 but we want to make sure that secondaries
187 * are sane */
188 hid4.bits.lg_pg_dis = 0; /* make sure we enable large pages */
189 mthid4(hid4.word);
191 hid5.bits.DCBZ_size = 0; /* make dcbz size 32 bytes */
192 hid5.bits.DCBZ32_ill = 0; /* make dzbz 32byte illeagal */
193 mthid5(hid5.word);
195 #ifdef DEBUG
196 printk("hid0 0x%016lx\n"
197 "hid1 0x%016lx\n"
198 "hid4 0x%016lx\n"
199 "hid5 0x%016lx\n",
200 mfhid0(), mfhid1(), mfhid4(), mfhid5());
201 #endif
203 mthior(cpu0_hior);
205 #ifdef DEBUG
206 dump_segments(1);
207 #endif
208 flush_segments();
209 }
211 void cpu_init_vcpu(struct vcpu *v)
212 {
213 struct domain *d = v->domain;
214 union hid4 hid4;
215 struct rma_settings *rma_settings;
217 hid4.word = mfhid4();
219 hid4.bits.lpes_0 = 0; /* external exceptions set MSR_HV=1 */
220 hid4.bits.lpes_1 = 1; /* RMA applies */
222 hid4.bits.rmor_0_15 = page_to_maddr(d->arch.rma_page) >> 26;
224 hid4.bits.lpid_0_1 = d->domain_id & 3;
225 hid4.bits.lpid_2_5 = (d->domain_id >> 2) & 0xf;
227 rma_settings = cpu_find_rma(d->arch.rma_order + PAGE_SHIFT);
228 ASSERT(rma_settings != NULL);
229 hid4.bits.rmlr_0 = rma_settings->rmlr_0;
230 hid4.bits.rmlr_1_2 = rma_settings->rmlr_1_2;
232 v->arch.cpu.hid4.word = hid4.word;
233 }
235 void save_cpu_sprs(struct vcpu *v)
236 {
237 /* HID4 is initialized with a per-domain value at domain creation time, and
238 * does not change after that. */
239 }
241 void load_cpu_sprs(struct vcpu *v)
242 {
243 mthid4(v->arch.cpu.hid4.word);
244 }
246 int cpu_machinecheck(struct cpu_user_regs *regs)
247 {
248 int recover = 0;
249 u32 dsisr = mfdsisr();
251 if (regs->msr & MCK_SRR1_RI)
252 recover = 1;
254 printk("MACHINE CHECK: %s Recoverable\n", recover ? "IS": "NOT");
255 printk("SRR1: 0x%016lx\n", regs->msr);
256 if (regs->msr & MCK_SRR1_INSN_FETCH_UNIT)
257 printk("42: Exception caused by Instruction Fetch Unit (IFU) "
258 "detection of a hardware uncorrectable error (UE).\n");
260 if (regs->msr & MCK_SRR1_LOAD_STORE)
261 printk("43: Exception caused by load/store detection of error "
262 "(see DSISR)\n");
264 switch (regs->msr & MCK_SRR1_CAUSE_MASK) {
265 case MCK_SRR1_CAUSE_SLB_PAR:
266 printk("0b01: Exception caused by an SLB parity error detected "
267 "while translating an instruction fetch address.\n");
268 break;
269 case MCK_SRR1_CAUSE_TLB_PAR:
270 printk("0b10: Exception caused by a TLB parity error detected "
271 "while translating an instruction fetch address.\n");
272 break;
273 case MCK_SRR1_CAUSE_UE:
274 printk("0b11: Exception caused by a hardware uncorrectable "
275 "error (UE) detected while doing a reload of an "
276 "instruction-fetch TLB tablewalk.\n");
277 break;
278 default:
279 break;
280 }
282 printk("\nDSIDR: 0x%08x\n", dsisr);
283 if (dsisr & MCK_DSISR_UE)
284 printk("16: Exception caused by a UE deferred error "
285 "(DAR is undefined).\n");
287 if (dsisr & MCK_DSISR_UE_TABLE_WALK)
288 printk("17: Exception caused by a UE deferred error "
289 "during a tablewalk (D-side).\n");
291 if (dsisr & MCK_DSISR_L1_DCACHE_PAR)
292 printk("18: Exception was caused by a software recoverable "
293 "parity error in the L1 D-cache.\n");
295 if (dsisr & MCK_DSISR_L1_DCACHE_TAG_PAR)
296 printk("19: Exception was caused by a software recoverable "
297 "parity error in the L1 D-cache tag.\n");
299 if (dsisr & MCK_DSISR_D_ERAT_PAR)
300 printk("20: Exception was caused by a software recoverable parity "
301 "error in the D-ERAT.\n");
303 if (dsisr & MCK_DSISR_TLB_PAR)
304 printk("21: Exception was caused by a software recoverable parity "
305 "error in the TLB.\n");
307 if (dsisr & MCK_DSISR_SLB_PAR)
308 printk("23: Exception was caused by an SLB parity error (may not be "
309 "recoverable). This condition could occur if the "
310 "effective segment ID (ESID) fields of two or more SLB "
311 "entries contain the same value.");
313 return 0; /* for now lets not recover; */
314 }