ia64/xen-unstable

view xen/arch/powerpc/powerpc64/ppc970.c @ 14237:eceb9ccd84a8

[POWERPC][XEN] Introduce "platform" abstraction to describe the IO hole.
Signed-off-by: Ryan Harper <ryanh@us.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Hollis Blanchard <hollisb@us.ibm.com>
date Fri Mar 02 17:06:50 2007 -0600 (2007-03-02)
parents d1f053ff43d2
children
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jimi Xenidis <jimix@watson.ibm.com>
20 * Amos Waterland <apw@us.ibm.com>
21 */
23 #include <xen/config.h>
24 #include <xen/types.h>
25 #include <xen/mm.h>
26 #include <xen/sched.h>
27 #include <xen/lib.h>
28 #include <asm/time.h>
29 #include <asm/current.h>
30 #include <asm/powerpc64/procarea.h>
31 #include <asm/powerpc64/processor.h>
32 #include <asm/powerpc64/ppc970-hid.h>
33 #include "scom.h"
35 #undef DEBUG
36 #undef SERIALIZE
38 struct cpu_caches cpu_caches = {
39 .dline_size = 0x80,
40 .log_dline_size = 7,
41 .dlines_per_page = PAGE_SIZE >> 7,
42 .isize = (64 << 10), /* 64 KiB */
43 .iline_size = 0x80,
44 .log_iline_size = 7,
45 .ilines_per_page = PAGE_SIZE >> 7,
46 };
49 void cpu_flush_icache(void)
50 {
51 union hid1 hid1;
52 ulong flags;
53 ulong ea;
55 local_irq_save(flags);
57 /* uses special processor mode that forces a real address match on
58 * the whole line */
59 hid1.word = mfhid1();
60 hid1.bits.en_icbi = 1;
61 mthid1(hid1.word);
63 for (ea = 0; ea < cpu_caches.isize; ea += cpu_caches.iline_size)
64 icbi(ea);
66 sync();
68 hid1.bits.en_icbi = 0;
69 mthid1(hid1.word);
71 local_irq_restore(flags);
72 }
75 struct rma_settings {
76 int log;
77 int rmlr_0;
78 int rmlr_1_2;
79 };
81 static struct rma_settings rma_logs[] = {
82 { .log = 26, .rmlr_0 = 0, .rmlr_1_2 = 3, }, /* 64 MB */
83 { .log = 27, .rmlr_0 = 1, .rmlr_1_2 = 3, }, /* 128 MB */
84 { .log = 28, .rmlr_0 = 1, .rmlr_1_2 = 0, }, /* 256 MB */
85 { .log = 30, .rmlr_0 = 0, .rmlr_1_2 = 2, }, /* 1 GB */
86 { .log = 34, .rmlr_0 = 0, .rmlr_1_2 = 1, }, /* 16 GB */
87 { .log = 38, .rmlr_0 = 0, .rmlr_1_2 = 0, }, /* 256 GB */
88 };
90 static uint log_large_page_sizes[] = {
91 4 + 20, /* (1 << 4) == 16M */
92 };
94 static struct rma_settings *cpu_find_rma(unsigned int log)
95 {
96 int i;
98 for (i = 0; i < ARRAY_SIZE(rma_logs); i++) {
99 if (rma_logs[i].log == log)
100 return &rma_logs[i];
101 }
102 return NULL;
103 }
105 unsigned int cpu_default_rma_order_pages(void)
106 {
107 return rma_logs[0].log - PAGE_SHIFT;
108 }
110 int cpu_rma_valid(unsigned int order)
111 {
112 return cpu_find_rma(order + PAGE_SHIFT) != NULL;
113 }
115 unsigned int cpu_large_page_orders(uint *sizes, uint max)
116 {
117 uint i = 0;
119 while (i < max && i < ARRAY_SIZE(log_large_page_sizes)) {
120 sizes[i] = log_large_page_sizes[i] - PAGE_SHIFT;
121 ++i;
122 }
124 return i;
125 }
127 unsigned int cpu_extent_order(void)
128 {
129 return log_large_page_sizes[0] - PAGE_SHIFT;
130 }
132 int cpu_threads(int cpuid)
133 {
134 return 1;
135 }
138 static u64 cpu0_hids[6];
139 static u64 cpu0_hior;
141 void cpu_initialize(int cpuid)
142 {
143 union hid0 hid0;
144 union hid1 hid1;
145 union hid4 hid4;
146 union hid5 hid5;
148 if (cpuid == 0) {
149 /* we can assume that these are sane to start with. We
150 * _do_not_ store the results in case we want to mess with them
151 * on a per-cpu basis later. */
152 cpu0_hids[0] = mfhid0();
153 cpu0_hids[1] = mfhid1();
154 cpu0_hids[4] = mfhid4();
155 cpu0_hids[5] = mfhid5();
156 cpu0_hior = 0;
157 }
159 hid0.word = cpu0_hids[0];
160 hid1.word = cpu0_hids[1];
161 hid4.word = cpu0_hids[4];
162 hid5.word = cpu0_hids[5];
164 /* This is SMP safe because the compiler must use r13 for it. */
165 parea = global_cpu_table[cpuid];
166 ASSERT(parea != NULL);
168 mthsprg0((ulong)parea); /* now ready for exceptions */
170 printk("CPU[PIR:%u IPI:%u Logical:%u] Hello World!\n",
171 mfpir(), hard_smp_processor_id(), smp_processor_id());
173 #ifdef DEBUG
174 {
175 ulong r1, r2;
177 asm volatile ("mr %0, 1" : "=r" (r1));
178 asm volatile ("mr %0, 2" : "=r" (r2));
179 printk(" SP = %lx TOC = %lx\n", r1, r2);
180 }
181 #endif
183 /* Set decrementers for 1 second to keep them out of the way during
184 * intialization. */
185 /* XXX make tickless */
186 mtdec(timebase_freq);
187 mthdec(timebase_freq);
189 /* FIXME Do not set the NAP bit in HID0 until we have had a chance
190 * to audit the safe halt and idle loop code. */
191 hid0.bits.nap = 0; /* NAP */
192 hid0.bits.dpm = 1; /* Dynamic Power Management */
194 hid0.bits.nhr = 1; /* Not Hard Reset */
195 hid0.bits.hdice_en = 1; /* enable HDEC */
196 hid0.bits.en_therm = 0; /* ! Enable ext thermal ints */
197 /* only debug Xen should activate ATTN */
198 hid0.bits.en_attn = 1; /* Enable attn instruction */
199 hid0.bits.en_mck = 1; /* Enable external machine check interrupts */
201 #ifdef SERIALIZE
202 hid0.bits.one_ppc = 1;
203 hid0.bits.isync_sc = 1;
204 hid0.bits.inorder = 1;
205 /* may not want these */
206 hid0.bits.do_single = 1;
207 hid0.bits.ser-gp = 1;
208 #endif
210 mthid0(hid0.word);
212 hid1.bits.bht_pm = 7; /* branch history table prediction mode */
213 hid1.bits.en_ls = 1; /* enable link stack */
215 hid1.bits.en_cc = 1; /* enable count cache */
216 hid1.bits.en_ic = 1; /* enable inst cache */
218 hid1.bits.pf_mode = 2; /* prefetch mode */
220 hid1.bits.en_if_cach = 1; /* i-fetch cacheability control */
221 hid1.bits.en_ic_rec = 1; /* i-cache parity error recovery */
222 hid1.bits.en_id_rec = 1; /* i-dir parity error recovery */
223 hid1.bits.en_er_rec = 1; /* i-ERAT parity error recovery */
225 hid1.bits.en_sp_itw = 1; /* En speculative tablewalks */
226 mthid1(hid1.word);
228 /* no changes to hid4 but we want to make sure that secondaries
229 * are sane */
230 hid4.bits.lg_pg_dis = 0; /* make sure we enable large pages */
231 mthid4(hid4.word);
233 hid5.bits.DC_mck = 1; /* Machine check enabled for dcache errors */
234 hid5.bits.DCBZ_size = 0; /* make dcbz size 32 bytes */
235 hid5.bits.DCBZ32_ill = 0; /* make dzbz 32byte illeagal */
236 mthid5(hid5.word);
238 #ifdef DEBUG
239 printk("hid0 0x%016lx\n"
240 "hid1 0x%016lx\n"
241 "hid4 0x%016lx\n"
242 "hid5 0x%016lx\n",
243 mfhid0(), mfhid1(), mfhid4(), mfhid5());
244 #endif
246 /* Make sure firmware has not left this dirty */
247 mthior(cpu0_hior);
249 /* some machine check goodness */
250 /* save this for checkstop processing */
251 if (cpuid == 0)
252 *mck_good_hid4 = hid4.word;
254 if (mfpir() > NR_CPUS)
255 panic("we do not expect a processor to have a PIR (%u) "
256 "to be larger that NR_CPUS(%u)\n",
257 mfpir(), NR_CPUS);
259 cpu_scom_init();
261 /* initialize the SLB */
262 #ifdef DEBUG
263 dump_segments(1);
264 #endif
265 flush_segments();
266 local_flush_tlb();
267 }
269 void cpu_init_vcpu(struct vcpu *v)
270 {
271 struct domain *d = v->domain;
272 union hid4 hid4;
273 struct rma_settings *rma_settings;
275 hid4.word = mfhid4();
277 hid4.bits.lpes_0 = 0; /* external exceptions set MSR_HV=1 */
278 hid4.bits.lpes_1 = 1; /* RMA applies */
280 hid4.bits.rmor_0_15 = page_to_maddr(d->arch.rma_page) >> 26;
282 hid4.bits.lpid_0_1 = d->domain_id & 3;
283 hid4.bits.lpid_2_5 = (d->domain_id >> 2) & 0xf;
285 rma_settings = cpu_find_rma(d->arch.rma_order + PAGE_SHIFT);
286 ASSERT(rma_settings != NULL);
287 hid4.bits.rmlr_0 = rma_settings->rmlr_0;
288 hid4.bits.rmlr_1_2 = rma_settings->rmlr_1_2;
290 v->arch.cpu.hid4.word = hid4.word;
291 }
293 void save_cpu_sprs(struct vcpu *v)
294 {
295 /* HID4 is initialized with a per-domain value at domain creation time, and
296 * does not change after that. */
297 }
299 void load_cpu_sprs(struct vcpu *v)
300 {
301 mthid4(v->arch.cpu.hid4.word);
302 }