ia64/xen-unstable

view xen/arch/powerpc/powerpc64/ppc970.c @ 12930:1c996041fcde

[XEN][POWERPC] Add Function to completely flush the I-Cache for a processor
Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Mon Oct 02 11:07:54 2006 -0400 (2006-10-02)
parents 489e4d09ffb7
children 7b6f0a4d5cdd
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jimi Xenidis <jimix@watson.ibm.com>
20 * Amos Waterland <apw@us.ibm.com>
21 */
23 #include <xen/config.h>
24 #include <xen/types.h>
25 #include <xen/mm.h>
26 #include <xen/sched.h>
27 #include <xen/lib.h>
28 #include <asm/time.h>
29 #include <asm/current.h>
30 #include <asm/powerpc64/procarea.h>
31 #include <asm/powerpc64/processor.h>
32 #include <asm/powerpc64/ppc970-hid.h>
33 #include "scom.h"
35 #undef DEBUG
36 #undef SERIALIZE
38 struct cpu_caches cpu_caches = {
39 .dline_size = 0x80,
40 .log_dline_size = 7,
41 .dlines_per_page = PAGE_SIZE >> 7,
42 .isize = (64 << 10), /* 64 KiB */
43 .iline_size = 0x80,
44 .log_iline_size = 7,
45 .ilines_per_page = PAGE_SIZE >> 7,
46 };
49 void cpu_flush_icache(void)
50 {
51 union hid1 hid1;
52 ulong flags;
53 ulong ra;
55 local_irq_save(flags);
57 /* uses special processor mode that forces a real address match */
58 hid1.word = mfhid1();
59 hid1.bits.en_icbi = 1;
60 mthid1(hid1.word);
62 for (ra = 0; ra < cpu_caches.isize; ra += cpu_caches.iline_size)
63 icbi(ra);
65 sync();
67 hid1.bits.en_icbi = 0;
68 mthid1(hid1.word);
70 local_irq_save(flags);
71 }
74 struct rma_settings {
75 int log;
76 int rmlr_0;
77 int rmlr_1_2;
78 };
80 static struct rma_settings rma_logs[] = {
81 { .log = 26, .rmlr_0 = 0, .rmlr_1_2 = 3, }, /* 64 MB */
82 { .log = 27, .rmlr_0 = 1, .rmlr_1_2 = 3, }, /* 128 MB */
83 { .log = 28, .rmlr_0 = 1, .rmlr_1_2 = 0, }, /* 256 MB */
84 { .log = 30, .rmlr_0 = 0, .rmlr_1_2 = 2, }, /* 1 GB */
85 { .log = 34, .rmlr_0 = 0, .rmlr_1_2 = 1, }, /* 16 GB */
86 { .log = 38, .rmlr_0 = 0, .rmlr_1_2 = 0, }, /* 256 GB */
87 };
89 static uint log_large_page_sizes[] = {
90 4 + 20, /* (1 << 4) == 16M */
91 };
93 static struct rma_settings *cpu_find_rma(unsigned int log)
94 {
95 int i;
97 for (i = 0; i < ARRAY_SIZE(rma_logs); i++) {
98 if (rma_logs[i].log == log)
99 return &rma_logs[i];
100 }
101 return NULL;
102 }
104 unsigned int cpu_default_rma_order_pages(void)
105 {
106 return rma_logs[0].log - PAGE_SHIFT;
107 }
109 int cpu_rma_valid(unsigned int order)
110 {
111 return cpu_find_rma(order + PAGE_SHIFT) != NULL;
112 }
114 unsigned int cpu_large_page_orders(uint *sizes, uint max)
115 {
116 uint i = 0;
118 while (i < max && i < ARRAY_SIZE(log_large_page_sizes)) {
119 sizes[i] = log_large_page_sizes[i] - PAGE_SHIFT;
120 ++i;
121 }
123 return i;
124 }
126 unsigned int cpu_extent_order(void)
127 {
128 return log_large_page_sizes[0] - PAGE_SHIFT;
129 }
132 /* This is more a platform thing than a CPU thing, but we only have
133 * one platform now */
134 int cpu_io_mfn(ulong mfn)
135 {
136 /* totally cheating */
137 if (mfn >= (2UL << (30 - PAGE_SHIFT)) && /* 2GiB */
138 mfn < (4UL << (30 - PAGE_SHIFT))) /* 4GiB */
139 return 1;
141 return 0;
142 }
144 static u64 cpu0_hids[6];
145 static u64 cpu0_hior;
147 void cpu_initialize(int cpuid)
148 {
149 union hid0 hid0;
150 union hid1 hid1;
151 union hid4 hid4;
152 union hid5 hid5;
154 if (cpuid == 0) {
155 /* we can assume that these are sane to start with. We
156 * _do_not_ store the results in case we want to mess with them
157 * on a per-cpu basis later. */
158 cpu0_hids[0] = mfhid0();
159 cpu0_hids[1] = mfhid1();
160 cpu0_hids[4] = mfhid4();
161 cpu0_hids[5] = mfhid5();
162 cpu0_hior = 0;
163 }
165 hid0.word = cpu0_hids[0];
166 hid1.word = cpu0_hids[1];
167 hid4.word = cpu0_hids[4];
168 hid5.word = cpu0_hids[5];
170 /* This is SMP safe because the compiler must use r13 for it. */
171 parea = global_cpu_table[cpuid];
172 ASSERT(parea != NULL);
174 mthsprg0((ulong)parea); /* now ready for exceptions */
176 printk("CPU[PIR:%u IPI:%u Logical:%u] Hello World!\n",
177 mfpir(), hard_smp_processor_id(), smp_processor_id());
179 #ifdef DEBUG
180 {
181 ulong r1, r2;
183 asm volatile ("mr %0, 1" : "=r" (r1));
184 asm volatile ("mr %0, 2" : "=r" (r2));
185 printk(" SP = %lx TOC = %lx\n", r1, r2);
186 }
187 #endif
189 /* Set decrementers for 1 second to keep them out of the way during
190 * intialization. */
191 /* XXX make tickless */
192 mtdec(timebase_freq);
193 mthdec(timebase_freq);
195 hid0.bits.nap = 1; /* NAP */
196 hid0.bits.dpm = 1; /* Dynamic Power Management */
197 hid0.bits.nhr = 1; /* Not Hard Reset */
198 hid0.bits.hdice_en = 1; /* enable HDEC */
199 hid0.bits.en_therm = 0; /* ! Enable ext thermal ints */
200 /* only debug Xen should activate ATTN */
201 hid0.bits.en_attn = 1; /* Enable attn instruction */
202 hid0.bits.en_mck = 1; /* Enable external machine check interrupts */
204 #ifdef SERIALIZE
205 hid0.bits.one_ppc = 1;
206 hid0.bits.isync_sc = 1;
207 hid0.bits.inorder = 1;
208 /* may not want these */
209 hid0.bits.do_single = 1;
210 hid0.bits.ser-gp = 1;
211 #endif
213 mthid0(hid0.word);
215 hid1.bits.bht_pm = 7; /* branch history table prediction mode */
216 hid1.bits.en_ls = 1; /* enable link stack */
218 hid1.bits.en_cc = 1; /* enable count cache */
219 hid1.bits.en_ic = 1; /* enable inst cache */
221 hid1.bits.pf_mode = 2; /* prefetch mode */
223 hid1.bits.en_if_cach = 1; /* i-fetch cacheability control */
224 hid1.bits.en_ic_rec = 1; /* i-cache parity error recovery */
225 hid1.bits.en_id_rec = 1; /* i-dir parity error recovery */
226 hid1.bits.en_er_rec = 1; /* i-ERAT parity error recovery */
228 hid1.bits.en_sp_itw = 1; /* En speculative tablewalks */
229 mthid1(hid1.word);
231 /* no changes to hid4 but we want to make sure that secondaries
232 * are sane */
233 hid4.bits.lg_pg_dis = 0; /* make sure we enable large pages */
234 mthid4(hid4.word);
236 hid5.bits.DC_mck = 1; /* Machine check enabled for dcache errors */
237 hid5.bits.DCBZ_size = 0; /* make dcbz size 32 bytes */
238 hid5.bits.DCBZ32_ill = 0; /* make dzbz 32byte illeagal */
239 mthid5(hid5.word);
241 #ifdef DEBUG
242 printk("hid0 0x%016lx\n"
243 "hid1 0x%016lx\n"
244 "hid4 0x%016lx\n"
245 "hid5 0x%016lx\n",
246 mfhid0(), mfhid1(), mfhid4(), mfhid5());
247 #endif
249 /* Make sure firmware has not left this dirty */
250 mthior(cpu0_hior);
252 /* some machine check goodness */
253 /* save this for checkstop processing */
254 if (cpuid == 0)
255 *mck_good_hid4 = hid4.word;
257 if (mfpir() > NR_CPUS)
258 panic("we do not expect a processor to have a PIR (%u) "
259 "to be larger that NR_CPUS(%u)\n",
260 mfpir(), NR_CPUS);
262 cpu_scom_init();
264 /* initialize the SLB */
265 #ifdef DEBUG
266 dump_segments(1);
267 #endif
268 flush_segments();
269 local_flush_tlb();
270 }
272 void cpu_init_vcpu(struct vcpu *v)
273 {
274 struct domain *d = v->domain;
275 union hid4 hid4;
276 struct rma_settings *rma_settings;
278 hid4.word = mfhid4();
280 hid4.bits.lpes_0 = 0; /* external exceptions set MSR_HV=1 */
281 hid4.bits.lpes_1 = 1; /* RMA applies */
283 hid4.bits.rmor_0_15 = page_to_maddr(d->arch.rma_page) >> 26;
285 hid4.bits.lpid_0_1 = d->domain_id & 3;
286 hid4.bits.lpid_2_5 = (d->domain_id >> 2) & 0xf;
288 rma_settings = cpu_find_rma(d->arch.rma_order + PAGE_SHIFT);
289 ASSERT(rma_settings != NULL);
290 hid4.bits.rmlr_0 = rma_settings->rmlr_0;
291 hid4.bits.rmlr_1_2 = rma_settings->rmlr_1_2;
293 v->arch.cpu.hid4.word = hid4.word;
294 }
296 void save_cpu_sprs(struct vcpu *v)
297 {
298 /* HID4 is initialized with a per-domain value at domain creation time, and
299 * does not change after that. */
300 }
302 void load_cpu_sprs(struct vcpu *v)
303 {
304 mthid4(v->arch.cpu.hid4.word);
305 }