ia64/xen-unstable

view xen/arch/powerpc/powerpc64/ppc970.c @ 12923:489e4d09ffb7

[XEN][POWERPC] Conistence with log vs. order
We use "log" for a log2 value, "order" is the log2 of page size, so:
order = log - PAGE_SHIFT
It is confusing, but more so if we are not consistent.
Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Fri Sep 29 09:53:39 2006 -0400 (2006-09-29)
parents 4da585fb62f9
children 1c996041fcde
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jimi Xenidis <jimix@watson.ibm.com>
20 * Amos Waterland <apw@us.ibm.com>
21 */
23 #include <xen/config.h>
24 #include <xen/types.h>
25 #include <xen/mm.h>
26 #include <xen/sched.h>
27 #include <xen/lib.h>
28 #include <asm/time.h>
29 #include <asm/current.h>
30 #include <asm/powerpc64/procarea.h>
31 #include <asm/powerpc64/processor.h>
32 #include <asm/powerpc64/ppc970-hid.h>
33 #include "scom.h"
35 #undef DEBUG
36 #undef SERIALIZE
38 struct cpu_caches cpu_caches = {
39 .dline_size = 0x80,
40 .log_dline_size = 7,
41 .dlines_per_page = PAGE_SIZE >> 7,
42 .iline_size = 0x80,
43 .log_iline_size = 7,
44 .ilines_per_page = PAGE_SIZE >> 7,
45 };
47 struct rma_settings {
48 int log;
49 int rmlr_0;
50 int rmlr_1_2;
51 };
53 static struct rma_settings rma_logs[] = {
54 { .log = 26, .rmlr_0 = 0, .rmlr_1_2 = 3, }, /* 64 MB */
55 { .log = 27, .rmlr_0 = 1, .rmlr_1_2 = 3, }, /* 128 MB */
56 { .log = 28, .rmlr_0 = 1, .rmlr_1_2 = 0, }, /* 256 MB */
57 { .log = 30, .rmlr_0 = 0, .rmlr_1_2 = 2, }, /* 1 GB */
58 { .log = 34, .rmlr_0 = 0, .rmlr_1_2 = 1, }, /* 16 GB */
59 { .log = 38, .rmlr_0 = 0, .rmlr_1_2 = 0, }, /* 256 GB */
60 };
62 static uint log_large_page_sizes[] = {
63 4 + 20, /* (1 << 4) == 16M */
64 };
66 static struct rma_settings *cpu_find_rma(unsigned int log)
67 {
68 int i;
70 for (i = 0; i < ARRAY_SIZE(rma_logs); i++) {
71 if (rma_logs[i].log == log)
72 return &rma_logs[i];
73 }
74 return NULL;
75 }
77 unsigned int cpu_default_rma_order_pages(void)
78 {
79 return rma_logs[0].log - PAGE_SHIFT;
80 }
82 int cpu_rma_valid(unsigned int order)
83 {
84 return cpu_find_rma(order + PAGE_SHIFT) != NULL;
85 }
87 unsigned int cpu_large_page_orders(uint *sizes, uint max)
88 {
89 uint i = 0;
91 while (i < max && i < ARRAY_SIZE(log_large_page_sizes)) {
92 sizes[i] = log_large_page_sizes[i] - PAGE_SHIFT;
93 ++i;
94 }
96 return i;
97 }
99 unsigned int cpu_extent_order(void)
100 {
101 return log_large_page_sizes[0] - PAGE_SHIFT;
102 }
105 /* This is more a platform thing than a CPU thing, but we only have
106 * one platform now */
107 int cpu_io_mfn(ulong mfn)
108 {
109 /* totally cheating */
110 if (mfn >= (2UL << (30 - PAGE_SHIFT)) && /* 2GiB */
111 mfn < (4UL << (30 - PAGE_SHIFT))) /* 4GiB */
112 return 1;
114 return 0;
115 }
117 static u64 cpu0_hids[6];
118 static u64 cpu0_hior;
120 void cpu_initialize(int cpuid)
121 {
122 union hid0 hid0;
123 union hid1 hid1;
124 union hid4 hid4;
125 union hid5 hid5;
127 if (cpuid == 0) {
128 /* we can assume that these are sane to start with. We
129 * _do_not_ store the results in case we want to mess with them
130 * on a per-cpu basis later. */
131 cpu0_hids[0] = mfhid0();
132 cpu0_hids[1] = mfhid1();
133 cpu0_hids[4] = mfhid4();
134 cpu0_hids[5] = mfhid5();
135 cpu0_hior = 0;
136 }
138 hid0.word = cpu0_hids[0];
139 hid1.word = cpu0_hids[1];
140 hid4.word = cpu0_hids[4];
141 hid5.word = cpu0_hids[5];
143 /* This is SMP safe because the compiler must use r13 for it. */
144 parea = global_cpu_table[cpuid];
145 ASSERT(parea != NULL);
147 mthsprg0((ulong)parea); /* now ready for exceptions */
149 printk("CPU[PIR:%u IPI:%u Logical:%u] Hello World!\n",
150 mfpir(), hard_smp_processor_id(), smp_processor_id());
152 #ifdef DEBUG
153 {
154 ulong r1, r2;
156 asm volatile ("mr %0, 1" : "=r" (r1));
157 asm volatile ("mr %0, 2" : "=r" (r2));
158 printk(" SP = %lx TOC = %lx\n", r1, r2);
159 }
160 #endif
162 /* Set decrementers for 1 second to keep them out of the way during
163 * intialization. */
164 /* XXX make tickless */
165 mtdec(timebase_freq);
166 mthdec(timebase_freq);
168 hid0.bits.nap = 1; /* NAP */
169 hid0.bits.dpm = 1; /* Dynamic Power Management */
170 hid0.bits.nhr = 1; /* Not Hard Reset */
171 hid0.bits.hdice_en = 1; /* enable HDEC */
172 hid0.bits.en_therm = 0; /* ! Enable ext thermal ints */
173 /* only debug Xen should activate ATTN */
174 hid0.bits.en_attn = 1; /* Enable attn instruction */
175 hid0.bits.en_mck = 1; /* Enable external machine check interrupts */
177 #ifdef SERIALIZE
178 hid0.bits.one_ppc = 1;
179 hid0.bits.isync_sc = 1;
180 hid0.bits.inorder = 1;
181 /* may not want these */
182 hid0.bits.do_single = 1;
183 hid0.bits.ser-gp = 1;
184 #endif
186 mthid0(hid0.word);
188 hid1.bits.bht_pm = 7; /* branch history table prediction mode */
189 hid1.bits.en_ls = 1; /* enable link stack */
191 hid1.bits.en_cc = 1; /* enable count cache */
192 hid1.bits.en_ic = 1; /* enable inst cache */
194 hid1.bits.pf_mode = 2; /* prefetch mode */
196 hid1.bits.en_if_cach = 1; /* i-fetch cacheability control */
197 hid1.bits.en_ic_rec = 1; /* i-cache parity error recovery */
198 hid1.bits.en_id_rec = 1; /* i-dir parity error recovery */
199 hid1.bits.en_er_rec = 1; /* i-ERAT parity error recovery */
201 hid1.bits.en_sp_itw = 1; /* En speculative tablewalks */
202 mthid1(hid1.word);
204 /* no changes to hid4 but we want to make sure that secondaries
205 * are sane */
206 hid4.bits.lg_pg_dis = 0; /* make sure we enable large pages */
207 mthid4(hid4.word);
209 hid5.bits.DC_mck = 1; /* Machine check enabled for dcache errors */
210 hid5.bits.DCBZ_size = 0; /* make dcbz size 32 bytes */
211 hid5.bits.DCBZ32_ill = 0; /* make dzbz 32byte illeagal */
212 mthid5(hid5.word);
214 #ifdef DEBUG
215 printk("hid0 0x%016lx\n"
216 "hid1 0x%016lx\n"
217 "hid4 0x%016lx\n"
218 "hid5 0x%016lx\n",
219 mfhid0(), mfhid1(), mfhid4(), mfhid5());
220 #endif
222 /* Make sure firmware has not left this dirty */
223 mthior(cpu0_hior);
225 /* some machine check goodness */
226 /* save this for checkstop processing */
227 if (cpuid == 0)
228 *mck_good_hid4 = hid4.word;
230 if (mfpir() > NR_CPUS)
231 panic("we do not expect a processor to have a PIR (%u) "
232 "to be larger that NR_CPUS(%u)\n",
233 mfpir(), NR_CPUS);
235 cpu_scom_init();
237 /* initialize the SLB */
238 #ifdef DEBUG
239 dump_segments(1);
240 #endif
241 flush_segments();
242 local_flush_tlb();
243 }
245 void cpu_init_vcpu(struct vcpu *v)
246 {
247 struct domain *d = v->domain;
248 union hid4 hid4;
249 struct rma_settings *rma_settings;
251 hid4.word = mfhid4();
253 hid4.bits.lpes_0 = 0; /* external exceptions set MSR_HV=1 */
254 hid4.bits.lpes_1 = 1; /* RMA applies */
256 hid4.bits.rmor_0_15 = page_to_maddr(d->arch.rma_page) >> 26;
258 hid4.bits.lpid_0_1 = d->domain_id & 3;
259 hid4.bits.lpid_2_5 = (d->domain_id >> 2) & 0xf;
261 rma_settings = cpu_find_rma(d->arch.rma_order + PAGE_SHIFT);
262 ASSERT(rma_settings != NULL);
263 hid4.bits.rmlr_0 = rma_settings->rmlr_0;
264 hid4.bits.rmlr_1_2 = rma_settings->rmlr_1_2;
266 v->arch.cpu.hid4.word = hid4.word;
267 }
269 void save_cpu_sprs(struct vcpu *v)
270 {
271 /* HID4 is initialized with a per-domain value at domain creation time, and
272 * does not change after that. */
273 }
275 void load_cpu_sprs(struct vcpu *v)
276 {
277 mthid4(v->arch.cpu.hid4.word);
278 }