ia64/xen-unstable

view xen/arch/x86/platform_hypercall.c @ 18806:ed8524f4a044

x86: Re-initialise HPET on resume from S3

Signed-off-by: Guanqun Lu <guanqun.lu@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 18 15:55:14 2008 +0000 (2008-11-18)
parents a9f299b11b7f
children 6401c9533ef5
line source
1 /******************************************************************************
2 * platform_hypercall.c
3 *
4 * Hardware platform operations. Intended for use by domain-0 kernel.
5 *
6 * Copyright (c) 2002-2006, K Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/guest_access.h>
21 #include <xen/acpi.h>
22 #include <asm/current.h>
23 #include <public/platform.h>
24 #include <acpi/cpufreq/processor_perf.h>
25 #include <asm/edd.h>
26 #include <asm/mtrr.h>
27 #include "cpu/mtrr/mtrr.h"
28 #include <xsm/xsm.h>
30 extern uint16_t boot_edid_caps;
31 extern uint8_t boot_edid_info[];
33 #ifndef COMPAT
34 typedef long ret_t;
35 DEFINE_SPINLOCK(xenpf_lock);
36 # undef copy_from_compat
37 # define copy_from_compat copy_from_guest
38 # undef copy_to_compat
39 # define copy_to_compat copy_to_guest
40 # undef guest_from_compat_handle
41 # define guest_from_compat_handle(x,y) ((x)=(y))
42 #else
43 extern spinlock_t xenpf_lock;
44 #endif
46 static DEFINE_PER_CPU(uint64_t, freq);
48 extern int set_px_pminfo(uint32_t cpu, struct xen_processor_performance *perf);
49 extern long set_cx_pminfo(uint32_t cpu, struct xen_processor_power *power);
51 static long cpu_frequency_change_helper(void *data)
52 {
53 return cpu_frequency_change(this_cpu(freq));
54 }
56 ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
57 {
58 ret_t ret = 0;
59 struct xen_platform_op curop, *op = &curop;
61 if ( !IS_PRIV(current->domain) )
62 return -EPERM;
64 if ( copy_from_guest(op, u_xenpf_op, 1) )
65 return -EFAULT;
67 if ( op->interface_version != XENPF_INTERFACE_VERSION )
68 return -EACCES;
70 spin_lock(&xenpf_lock);
72 switch ( op->cmd )
73 {
74 case XENPF_settime:
75 {
76 ret = xsm_xen_settime();
77 if ( ret )
78 break;
80 do_settime(op->u.settime.secs,
81 op->u.settime.nsecs,
82 op->u.settime.system_time);
83 ret = 0;
84 }
85 break;
87 case XENPF_add_memtype:
88 {
89 ret = xsm_memtype(op->cmd);
90 if ( ret )
91 break;
93 ret = mtrr_add_page(
94 op->u.add_memtype.mfn,
95 op->u.add_memtype.nr_mfns,
96 op->u.add_memtype.type,
97 1);
98 if ( ret >= 0 )
99 {
100 op->u.add_memtype.handle = 0;
101 op->u.add_memtype.reg = ret;
102 ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
103 if ( ret != 0 )
104 mtrr_del_page(ret, 0, 0);
105 }
106 }
107 break;
109 case XENPF_del_memtype:
110 {
111 ret = xsm_memtype(op->cmd);
112 if ( ret )
113 break;
115 if (op->u.del_memtype.handle == 0
116 /* mtrr/main.c otherwise does a lookup */
117 && (int)op->u.del_memtype.reg >= 0)
118 {
119 ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
120 if ( ret > 0 )
121 ret = 0;
122 }
123 else
124 ret = -EINVAL;
125 }
126 break;
128 case XENPF_read_memtype:
129 {
130 unsigned long mfn, nr_mfns;
131 mtrr_type type;
133 ret = xsm_memtype(op->cmd);
134 if ( ret )
135 break;
137 ret = -EINVAL;
138 if ( op->u.read_memtype.reg < num_var_ranges )
139 {
140 mtrr_if->get(op->u.read_memtype.reg, &mfn, &nr_mfns, &type);
141 op->u.read_memtype.mfn = mfn;
142 op->u.read_memtype.nr_mfns = nr_mfns;
143 op->u.read_memtype.type = type;
144 ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
145 }
146 }
147 break;
149 case XENPF_microcode_update:
150 {
151 XEN_GUEST_HANDLE(const_void) data;
153 ret = xsm_microcode();
154 if ( ret )
155 break;
157 guest_from_compat_handle(data, op->u.microcode.data);
158 ret = microcode_update(data, op->u.microcode.length);
159 }
160 break;
162 case XENPF_platform_quirk:
163 {
164 extern int opt_noirqbalance;
165 int quirk_id = op->u.platform_quirk.quirk_id;
167 ret = xsm_platform_quirk(quirk_id);
168 if ( ret )
169 break;
171 switch ( quirk_id )
172 {
173 case QUIRK_NOIRQBALANCING:
174 printk("Platform quirk -- Disabling IRQ balancing/affinity.\n");
175 opt_noirqbalance = 1;
176 setup_ioapic_dest();
177 break;
178 case QUIRK_IOAPIC_BAD_REGSEL:
179 case QUIRK_IOAPIC_GOOD_REGSEL:
180 #ifndef sis_apic_bug
181 sis_apic_bug = (quirk_id == QUIRK_IOAPIC_BAD_REGSEL);
182 dprintk(XENLOG_INFO, "Domain 0 says that IO-APIC REGSEL is %s\n",
183 sis_apic_bug ? "bad" : "good");
184 #else
185 BUG_ON(sis_apic_bug != (quirk_id == QUIRK_IOAPIC_BAD_REGSEL));
186 #endif
187 break;
188 default:
189 ret = -EINVAL;
190 break;
191 }
192 }
193 break;
195 case XENPF_firmware_info:
196 ret = xsm_firmware_info();
197 if ( ret )
198 break;
200 switch ( op->u.firmware_info.type )
201 {
202 case XEN_FW_DISK_INFO: {
203 const struct edd_info *info;
204 u16 length;
206 ret = -ESRCH;
207 if ( op->u.firmware_info.index >= bootsym(boot_edd_info_nr) )
208 break;
210 info = bootsym(boot_edd_info) + op->u.firmware_info.index;
212 /* Transfer the EDD info block. */
213 ret = -EFAULT;
214 if ( copy_from_compat(&length, op->u.firmware_info.u.
215 disk_info.edd_params, 1) )
216 break;
217 if ( length > info->edd_device_params.length )
218 length = info->edd_device_params.length;
219 if ( copy_to_compat(op->u.firmware_info.u.disk_info.edd_params,
220 (u8 *)&info->edd_device_params,
221 length) )
222 break;
223 if ( copy_to_compat(op->u.firmware_info.u.disk_info.edd_params,
224 &length, 1) )
225 break;
227 /* Transfer miscellaneous other information values. */
228 #define C(x) op->u.firmware_info.u.disk_info.x = info->x
229 C(device);
230 C(version);
231 C(interface_support);
232 C(legacy_max_cylinder);
233 C(legacy_max_head);
234 C(legacy_sectors_per_track);
235 #undef C
237 ret = (copy_field_to_guest(u_xenpf_op, op,
238 u.firmware_info.u.disk_info)
239 ? -EFAULT : 0);
240 break;
241 }
242 case XEN_FW_DISK_MBR_SIGNATURE: {
243 const struct mbr_signature *sig;
245 ret = -ESRCH;
246 if ( op->u.firmware_info.index >= bootsym(boot_mbr_signature_nr) )
247 break;
249 sig = bootsym(boot_mbr_signature) + op->u.firmware_info.index;
251 op->u.firmware_info.u.disk_mbr_signature.device = sig->device;
252 op->u.firmware_info.u.disk_mbr_signature.mbr_signature =
253 sig->signature;
255 ret = (copy_field_to_guest(u_xenpf_op, op,
256 u.firmware_info.u.disk_mbr_signature)
257 ? -EFAULT : 0);
258 break;
259 }
260 case XEN_FW_VBEDDC_INFO:
261 ret = -ESRCH;
262 if ( op->u.firmware_info.index != 0 )
263 break;
264 if ( *(u32 *)bootsym(boot_edid_info) == 0x13131313 )
265 break;
267 op->u.firmware_info.u.vbeddc_info.capabilities =
268 bootsym(boot_edid_caps);
269 op->u.firmware_info.u.vbeddc_info.edid_transfer_time =
270 bootsym(boot_edid_caps) >> 8;
272 ret = 0;
273 if ( copy_field_to_guest(u_xenpf_op, op, u.firmware_info.
274 u.vbeddc_info.capabilities) ||
275 copy_field_to_guest(u_xenpf_op, op, u.firmware_info.
276 u.vbeddc_info.edid_transfer_time) ||
277 copy_to_compat(op->u.firmware_info.u.vbeddc_info.edid,
278 bootsym(boot_edid_info), 128) )
279 ret = -EFAULT;
280 break;
281 default:
282 ret = -EINVAL;
283 break;
284 }
285 break;
287 case XENPF_enter_acpi_sleep:
288 ret = xsm_acpi_sleep();
289 if ( ret )
290 break;
292 ret = acpi_enter_sleep(&op->u.enter_acpi_sleep);
293 break;
295 case XENPF_change_freq:
296 ret = xsm_change_freq();
297 if ( ret )
298 break;
300 ret = -ENOSYS;
301 if ( cpufreq_controller != FREQCTL_dom0_kernel )
302 break;
303 ret = -EINVAL;
304 if ( op->u.change_freq.flags || !cpu_online(op->u.change_freq.cpu) )
305 break;
306 per_cpu(freq, op->u.change_freq.cpu) = op->u.change_freq.freq;
307 ret = continue_hypercall_on_cpu(op->u.change_freq.cpu,
308 cpu_frequency_change_helper,
309 NULL);
310 break;
312 case XENPF_getidletime:
313 {
314 uint32_t cpu;
315 uint64_t idletime, now = NOW();
316 struct vcpu *v;
317 struct xenctl_cpumap ctlmap;
318 cpumask_t cpumap;
319 XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
320 XEN_GUEST_HANDLE(uint64) idletimes;
322 ret = xsm_getidletime();
323 if ( ret )
324 break;
326 ret = -ENOSYS;
327 if ( cpufreq_controller != FREQCTL_dom0_kernel )
328 break;
330 ctlmap.nr_cpus = op->u.getidletime.cpumap_nr_cpus;
331 guest_from_compat_handle(cpumap_bitmap,
332 op->u.getidletime.cpumap_bitmap);
333 ctlmap.bitmap.p = cpumap_bitmap.p; /* handle -> handle_64 conversion */
334 xenctl_cpumap_to_cpumask(&cpumap, &ctlmap);
335 guest_from_compat_handle(idletimes, op->u.getidletime.idletime);
337 for_each_cpu_mask ( cpu, cpumap )
338 {
339 if ( (v = idle_vcpu[cpu]) != NULL )
340 {
341 idletime = v->runstate.time[RUNSTATE_running];
342 if ( v->is_running )
343 idletime += now - v->runstate.state_entry_time;
344 }
345 else
346 {
347 idletime = 0;
348 cpu_clear(cpu, cpumap);
349 }
351 ret = -EFAULT;
352 if ( copy_to_guest_offset(idletimes, cpu, &idletime, 1) )
353 goto out;
354 }
356 op->u.getidletime.now = now;
357 cpumask_to_xenctl_cpumap(&ctlmap, &cpumap);
358 ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
359 }
360 break;
362 case XENPF_set_processor_pminfo:
363 switch ( op->u.set_pminfo.type )
364 {
365 case XEN_PM_PX:
366 if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) )
367 {
368 ret = -ENOSYS;
369 break;
370 }
371 ret = set_px_pminfo(op->u.set_pminfo.id, &op->u.set_pminfo.perf);
372 break;
374 case XEN_PM_CX:
375 if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_CX) )
376 {
377 ret = -ENOSYS;
378 break;
379 }
380 ret = set_cx_pminfo(op->u.set_pminfo.id, &op->u.set_pminfo.power);
381 break;
383 case XEN_PM_TX:
384 if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_TX) )
385 {
386 ret = -ENOSYS;
387 break;
388 }
389 ret = -EINVAL;
390 break;
392 default:
393 ret = -EINVAL;
394 break;
395 }
396 break;
398 default:
399 ret = -ENOSYS;
400 break;
401 }
403 out:
404 spin_unlock(&xenpf_lock);
406 return ret;
407 }
409 /*
410 * Local variables:
411 * mode: C
412 * c-set-style: "BSD"
413 * c-basic-offset: 4
414 * tab-width: 4
415 * indent-tabs-mode: nil
416 * End:
417 */