direct-io.hg

view xen/arch/powerpc/powerpc64/ppc970.c @ 11526:ce9c34c049c5

[POWERPC][XEN] Track the Hard CPUID as configured by the FW

This patch correctly implements and supports hard_smp_processor_id().

Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Mon Sep 18 09:23:51 2006 -0400 (2006-09-18)
parents a3762039dc23
children 4da585fb62f9
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jimi Xenidis <jimix@watson.ibm.com>
20 * Amos Waterland <apw@us.ibm.com>
21 */
23 #include <xen/config.h>
24 #include <xen/types.h>
25 #include <xen/mm.h>
26 #include <xen/sched.h>
27 #include <xen/lib.h>
28 #include <asm/time.h>
29 #include <asm/current.h>
30 #include <asm/powerpc64/procarea.h>
31 #include <asm/powerpc64/processor.h>
32 #include <asm/powerpc64/ppc970-hid.h>
34 #undef DEBUG
35 #undef SERIALIZE
37 struct cpu_caches cpu_caches = {
38 .dline_size = 0x80,
39 .log_dline_size = 7,
40 .dlines_per_page = PAGE_SIZE >> 7,
41 .iline_size = 0x80,
42 .log_iline_size = 7,
43 .ilines_per_page = PAGE_SIZE >> 7,
44 };
46 struct rma_settings {
47 int order;
48 int rmlr_0;
49 int rmlr_1_2;
50 };
52 static struct rma_settings rma_orders[] = {
53 { .order = 26, .rmlr_0 = 0, .rmlr_1_2 = 3, }, /* 64 MB */
54 { .order = 27, .rmlr_0 = 1, .rmlr_1_2 = 3, }, /* 128 MB */
55 { .order = 28, .rmlr_0 = 1, .rmlr_1_2 = 0, }, /* 256 MB */
56 { .order = 30, .rmlr_0 = 0, .rmlr_1_2 = 2, }, /* 1 GB */
57 { .order = 34, .rmlr_0 = 0, .rmlr_1_2 = 1, }, /* 16 GB */
58 { .order = 38, .rmlr_0 = 0, .rmlr_1_2 = 0, }, /* 256 GB */
59 };
61 static uint log_large_page_sizes[] = {
62 4 + 20, /* (1 << 4) == 16M */
63 };
65 static struct rma_settings *cpu_find_rma(unsigned int order)
66 {
67 int i;
68 for (i = 0; i < ARRAY_SIZE(rma_orders); i++) {
69 if (rma_orders[i].order == order)
70 return &rma_orders[i];
71 }
72 return NULL;
73 }
75 unsigned int cpu_default_rma_order_pages(void)
76 {
77 return rma_orders[0].order - PAGE_SHIFT;
78 }
80 int cpu_rma_valid(unsigned int log)
81 {
82 return cpu_find_rma(log) != NULL;
83 }
85 unsigned int cpu_large_page_orders(uint *sizes, uint max)
86 {
87 uint i = 0;
89 while (i < max && i < ARRAY_SIZE(log_large_page_sizes)) {
90 sizes[i] = log_large_page_sizes[i] - PAGE_SHIFT;
91 ++i;
92 }
94 return i;
95 }
97 unsigned int cpu_extent_order(void)
98 {
99 return log_large_page_sizes[0] - PAGE_SHIFT;
100 }
103 /* This is more a platform thing than a CPU thing, but we only have
104 * one platform now */
105 int cpu_io_mfn(ulong mfn)
106 {
107 /* totally cheating */
108 if (mfn >= (2UL << (30 - PAGE_SHIFT)) && /* 2GiB */
109 mfn < (4UL << (30 - PAGE_SHIFT))) /* 4GiB */
110 return 1;
112 return 0;
113 }
115 static u64 cpu0_hids[6];
116 static u64 cpu0_hior;
118 void cpu_initialize(int cpuid)
119 {
120 union hid0 hid0;
121 union hid1 hid1;
122 union hid4 hid4;
123 union hid5 hid5;
125 if (cpuid == 0) {
126 /* we can assume that these are sane to start with. We
127 * _do_not_ store the results in case we want to mess with them
128 * on a per-cpu basis later. */
129 cpu0_hids[0] = mfhid0();
130 cpu0_hids[1] = mfhid1();
131 cpu0_hids[4] = mfhid4();
132 cpu0_hids[5] = mfhid5();
133 cpu0_hior = 0;
134 }
136 hid0.word = cpu0_hids[0];
137 hid1.word = cpu0_hids[1];
138 hid4.word = cpu0_hids[4];
139 hid5.word = cpu0_hids[5];
141 /* This is SMP safe because the compiler must use r13 for it. */
142 parea = global_cpu_table[cpuid];
143 ASSERT(parea != NULL);
145 mthsprg0((ulong)parea); /* now ready for exceptions */
147 printk("CPU[PIR:%u IPI:%u Logical:%u] Hello World!\n",
148 mfpir(), hard_smp_processor_id(), smp_processor_id());
150 #ifdef DEBUG
151 {
152 ulong r1, r2;
154 asm volatile ("mr %0, 1" : "=r" (r1));
155 asm volatile ("mr %0, 2" : "=r" (r2));
156 printk(" SP = %lx TOC = %lx\n", r1, r2);
157 }
158 #endif
160 /* Set decrementers for 1 second to keep them out of the way during
161 * intialization. */
162 /* XXX make tickless */
163 mtdec(timebase_freq);
164 mthdec(timebase_freq);
166 hid0.bits.nap = 1; /* NAP */
167 hid0.bits.dpm = 1; /* Dynamic Power Management */
168 hid0.bits.nhr = 1; /* Not Hard Reset */
169 hid0.bits.hdice_en = 1; /* enable HDEC */
170 hid0.bits.en_therm = 0; /* ! Enable ext thermal ints */
171 /* only debug Xen should activate ATTN */
172 hid0.bits.en_attn = 1; /* Enable attn instruction */
173 hid0.bits.en_mck = 1; /* Enable external machine check interrupts */
175 #ifdef SERIALIZE
176 hid0.bits.one_ppc = 1;
177 hid0.bits.isync_sc = 1;
178 hid0.bits.inorder = 1;
179 /* may not want these */
180 hid0.bits.do_single = 1;
181 hid0.bits.ser-gp = 1;
182 #endif
184 mthid0(hid0.word);
186 hid1.bits.bht_pm = 7; /* branch history table prediction mode */
187 hid1.bits.en_ls = 1; /* enable link stack */
189 hid1.bits.en_cc = 1; /* enable count cache */
190 hid1.bits.en_ic = 1; /* enable inst cache */
192 hid1.bits.pf_mode = 2; /* prefetch mode */
194 hid1.bits.en_if_cach = 1; /* i-fetch cacheability control */
195 hid1.bits.en_ic_rec = 1; /* i-cache parity error recovery */
196 hid1.bits.en_id_rec = 1; /* i-dir parity error recovery */
197 hid1.bits.en_er_rec = 1; /* i-ERAT parity error recovery */
199 hid1.bits.en_sp_itw = 1; /* En speculative tablewalks */
200 mthid1(hid1.word);
202 /* no changes to hid4 but we want to make sure that secondaries
203 * are sane */
204 hid4.bits.lg_pg_dis = 0; /* make sure we enable large pages */
205 mthid4(hid4.word);
207 hid5.bits.DC_mck = 1; /* Machine check enabled for dcache errors */
208 hid5.bits.DCBZ_size = 0; /* make dcbz size 32 bytes */
209 hid5.bits.DCBZ32_ill = 0; /* make dzbz 32byte illeagal */
210 mthid5(hid5.word);
212 #ifdef DEBUG
213 printk("hid0 0x%016lx\n"
214 "hid1 0x%016lx\n"
215 "hid4 0x%016lx\n"
216 "hid5 0x%016lx\n",
217 mfhid0(), mfhid1(), mfhid4(), mfhid5());
218 #endif
220 /* Make sure firmware has not left this dirty */
221 mthior(cpu0_hior);
223 /* some machine check goodness */
224 /* save this for checkstop processing */
225 if (cpuid == 0)
226 *mck_good_hid4 = hid4.word;
228 if (mfpir() > NR_CPUS)
229 panic("we do not expect a processor to have a PIR (%u) "
230 "to be larger that NR_CPUS(%u)\n",
231 mfpir(), NR_CPUS);
233 cpu_scom_init();
235 /* initialize the SLB */
236 #ifdef DEBUG
237 dump_segments(1);
238 #endif
239 flush_segments();
240 local_flush_tlb();
241 }
243 void cpu_init_vcpu(struct vcpu *v)
244 {
245 struct domain *d = v->domain;
246 union hid4 hid4;
247 struct rma_settings *rma_settings;
249 hid4.word = mfhid4();
251 hid4.bits.lpes_0 = 0; /* external exceptions set MSR_HV=1 */
252 hid4.bits.lpes_1 = 1; /* RMA applies */
254 hid4.bits.rmor_0_15 = page_to_maddr(d->arch.rma_page) >> 26;
256 hid4.bits.lpid_0_1 = d->domain_id & 3;
257 hid4.bits.lpid_2_5 = (d->domain_id >> 2) & 0xf;
259 rma_settings = cpu_find_rma(d->arch.rma_order + PAGE_SHIFT);
260 ASSERT(rma_settings != NULL);
261 hid4.bits.rmlr_0 = rma_settings->rmlr_0;
262 hid4.bits.rmlr_1_2 = rma_settings->rmlr_1_2;
264 v->arch.cpu.hid4.word = hid4.word;
265 }
267 void save_cpu_sprs(struct vcpu *v)
268 {
269 /* HID4 is initialized with a per-domain value at domain creation time, and
270 * does not change after that. */
271 }
273 void load_cpu_sprs(struct vcpu *v)
274 {
275 mthid4(v->arch.cpu.hid4.word);
276 }