ia64/xen-unstable

annotate xen/arch/x86/hvm/hvm.c @ 14181:d39dcdb9cca3

hvm: Only do hvm_disable() on HVM-enabled systems.

Original patch by Jan Beulich.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Feb 28 14:44:52 2007 +0000 (2007-02-28)
parents 720afbf74001
children 09a9b6d6c356
rev   line source
kaf24@8708 1 /*
kaf24@8708 2 * hvm.c: Common hardware virtual machine abstractions.
kaf24@8708 3 *
kaf24@8708 4 * Copyright (c) 2004, Intel Corporation.
kaf24@8708 5 * Copyright (c) 2005, International Business Machines Corporation.
kaf24@8708 6 *
kaf24@8708 7 * This program is free software; you can redistribute it and/or modify it
kaf24@8708 8 * under the terms and conditions of the GNU General Public License,
kaf24@8708 9 * version 2, as published by the Free Software Foundation.
kaf24@8708 10 *
kaf24@8708 11 * This program is distributed in the hope it will be useful, but WITHOUT
kaf24@8708 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
kaf24@8708 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
kaf24@8708 14 * more details.
kaf24@8708 15 *
kaf24@8708 16 * You should have received a copy of the GNU General Public License along with
kaf24@8708 17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
kaf24@8708 18 * Place - Suite 330, Boston, MA 02111-1307 USA.
kaf24@8708 19 */
kaf24@8708 20
kaf24@8708 21 #include <xen/config.h>
kaf24@8708 22 #include <xen/init.h>
kaf24@8708 23 #include <xen/lib.h>
kaf24@8708 24 #include <xen/trace.h>
kaf24@8708 25 #include <xen/sched.h>
kaf24@8708 26 #include <xen/irq.h>
kaf24@8708 27 #include <xen/softirq.h>
kaf24@9016 28 #include <xen/domain.h>
kaf24@8708 29 #include <xen/domain_page.h>
kfraser@10892 30 #include <xen/hypercall.h>
kfraser@10976 31 #include <xen/guest_access.h>
sos22@11002 32 #include <xen/event.h>
kaf24@8708 33 #include <asm/current.h>
kaf24@11092 34 #include <asm/e820.h>
kaf24@8708 35 #include <asm/io.h>
Tim@13909 36 #include <asm/paging.h>
kaf24@8708 37 #include <asm/regs.h>
kaf24@8708 38 #include <asm/cpufeature.h>
kaf24@8708 39 #include <asm/processor.h>
kaf24@8708 40 #include <asm/types.h>
kaf24@8708 41 #include <asm/msr.h>
kfraser@11890 42 #include <asm/mc146818rtc.h>
kaf24@8708 43 #include <asm/spinlock.h>
kaf24@8708 44 #include <asm/hvm/hvm.h>
kfraser@11969 45 #include <asm/hvm/vpt.h>
kaf24@8708 46 #include <asm/hvm/support.h>
kaf24@8708 47 #include <public/sched.h>
kaf24@8708 48 #include <public/hvm/ioreq.h>
kfraser@10976 49 #include <public/version.h>
kfraser@10976 50 #include <public/memory.h>
kaf24@8708 51
kfraser@14090 52 int hvm_enabled __read_mostly;
kaf24@8708 53
kfraser@14090 54 unsigned int opt_hvm_debug_level __read_mostly;
kaf24@8708 55 integer_param("hvm_debug", opt_hvm_debug_level);
kaf24@8708 56
kfraser@14090 57 struct hvm_function_table hvm_funcs __read_mostly;
kaf24@8708 58
kfraser@13627 59 /* I/O permission bitmap is globally shared by all HVM guests. */
kfraser@13627 60 char __attribute__ ((__section__ (".bss.page_aligned")))
kfraser@13627 61 hvm_io_bitmap[3*PAGE_SIZE];
kfraser@13627 62
kfraser@14090 63 void hvm_enable(struct hvm_function_table *fns)
kfraser@13627 64 {
kfraser@13627 65 if ( hvm_enabled )
kfraser@13627 66 return;
kfraser@13627 67
kfraser@13627 68 /*
kfraser@13627 69 * Allow direct access to the PC debug port (it is often used for I/O
kfraser@13627 70 * delays, but the vmexits simply slow things down).
kfraser@13627 71 */
kfraser@13627 72 memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap));
kfraser@13627 73 clear_bit(0x80, hvm_io_bitmap);
kfraser@13627 74
kfraser@14090 75 hvm_funcs = *fns;
kfraser@13627 76 hvm_enabled = 1;
kfraser@13627 77 }
kfraser@13627 78
kfraser@14181 79 void hvm_disable(void)
kfraser@14181 80 {
kfraser@14181 81 if ( hvm_enabled )
kfraser@14181 82 hvm_funcs.disable();
kfraser@14181 83 }
kfraser@14181 84
shand@11153 85 void hvm_stts(struct vcpu *v)
shand@11153 86 {
shand@11153 87 /* FPU state already dirty? Then no need to setup_fpu() lazily. */
kfraser@12349 88 if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
kfraser@12349 89 hvm_funcs.stts(v);
shand@11153 90 }
shand@11153 91
shand@11153 92 void hvm_set_guest_time(struct vcpu *v, u64 gtime)
shand@11153 93 {
shand@11153 94 u64 host_tsc;
kfraser@12264 95
shand@11153 96 rdtscll(host_tsc);
kfraser@12264 97
shand@11153 98 v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
shand@11153 99 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
shand@11153 100 }
shand@11153 101
kfraser@12504 102 u64 hvm_get_guest_time(struct vcpu *v)
kfraser@12504 103 {
kfraser@12504 104 u64 host_tsc;
kfraser@12504 105
kfraser@12504 106 rdtscll(host_tsc);
kfraser@12504 107 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
kfraser@12504 108 }
kfraser@12504 109
Tim@12494 110 void hvm_migrate_timers(struct vcpu *v)
Tim@12494 111 {
kfraser@13132 112 pit_migrate_timers(v);
kfraser@13132 113 rtc_migrate_timers(v);
kfraser@13151 114 hpet_migrate_timers(v);
kfraser@13133 115 if ( vcpu_vlapic(v)->pt.enabled )
kfraser@13133 116 migrate_timer(&vcpu_vlapic(v)->pt.timer, v->processor);
Tim@12494 117 }
Tim@12494 118
shand@11153 119 void hvm_do_resume(struct vcpu *v)
shand@11153 120 {
shand@11153 121 ioreq_t *p;
shand@11153 122
shand@11153 123 hvm_stts(v);
shand@11153 124
kfraser@13132 125 pt_thaw_time(v);
Tim@12494 126
kfraser@12349 127 /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
shand@11153 128 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
kfraser@12349 129 while ( p->state != STATE_IOREQ_NONE )
kfraser@11939 130 {
kfraser@12349 131 switch ( p->state )
kfraser@12349 132 {
kfraser@12349 133 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
kfraser@12349 134 hvm_io_assist(v);
kfraser@12349 135 break;
kfraser@12349 136 case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
kfraser@12349 137 case STATE_IOREQ_INPROCESS:
kfraser@12349 138 wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
kfraser@12349 139 (p->state != STATE_IOREQ_READY) &&
kfraser@12349 140 (p->state != STATE_IOREQ_INPROCESS));
kfraser@12349 141 break;
kfraser@12349 142 default:
kfraser@12349 143 gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
kfraser@12349 144 domain_crash_synchronous();
kfraser@12349 145 }
shand@11153 146 }
shand@11153 147 }
shand@11153 148
kaf24@12257 149 int hvm_domain_initialise(struct domain *d)
kaf24@12257 150 {
kaf24@12257 151 int rc;
sos22@11002 152
kaf24@12257 153 if ( !hvm_enabled )
kaf24@12257 154 {
kaf24@12257 155 gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
kaf24@12257 156 "on a non-VT/AMDV platform.\n");
kaf24@12257 157 return -EINVAL;
kaf24@12257 158 }
kaf24@12257 159
kaf24@12257 160 spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
kaf24@12257 161 spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
Tim@13543 162 spin_lock_init(&d->arch.hvm_domain.irq_lock);
kaf24@12257 163
Tim@13909 164 rc = paging_enable(d, PG_SH_enable|PG_refcounts|PG_translate|PG_external);
kaf24@12257 165 if ( rc != 0 )
kaf24@12257 166 return rc;
kaf24@12257 167
kaf24@12571 168 vpic_init(d);
kfraser@12291 169 vioapic_init(d);
kaf24@12257 170
kaf24@12257 171 return 0;
kaf24@12257 172 }
kaf24@12257 173
kfraser@12265 174 void hvm_domain_destroy(struct domain *d)
kfraser@12265 175 {
kfraser@13132 176 pit_deinit(d);
kfraser@12265 177 rtc_deinit(d);
kfraser@13151 178 hpet_deinit(d);
kfraser@12265 179
kfraser@12265 180 if ( d->arch.hvm_domain.shared_page_va )
kfraser@12265 181 unmap_domain_page_global(
kfraser@12265 182 (void *)d->arch.hvm_domain.shared_page_va);
kfraser@12265 183
kfraser@12265 184 if ( d->arch.hvm_domain.buffered_io_va )
kfraser@12265 185 unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
kfraser@12265 186 }
kfraser@12265 187
Tim@13766 188 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
kaf24@13677 189 {
Tim@13766 190 struct vcpu *v;
Tim@13766 191 struct hvm_hw_cpu ctxt;
kaf24@13677 192
Tim@13766 193 for_each_vcpu(d, v)
Tim@13766 194 {
Tim@13766 195 /* We don't need to save state for a vcpu that is down; the restore
Tim@13766 196 * code will leave it down if there is nothing saved. */
Tim@13766 197 if ( test_bit(_VCPUF_down, &v->vcpu_flags) )
Tim@13766 198 continue;
kaf24@13677 199
Tim@13766 200 hvm_funcs.save_cpu_ctxt(v, &ctxt);
Tim@13766 201 if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
Tim@13766 202 return 1;
Tim@13766 203 }
Tim@13766 204 return 0;
kaf24@13677 205 }
kaf24@13677 206
Tim@13766 207 static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
kfraser@13618 208 {
Tim@13766 209 int vcpuid;
Tim@13766 210 struct vcpu *v;
Tim@13766 211 struct hvm_hw_cpu ctxt;
kfraser@13618 212
Tim@13766 213 /* Which vcpu is this? */
Tim@13766 214 vcpuid = hvm_load_instance(h);
Tim@13766 215 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
Tim@13766 216 {
Tim@13766 217 gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
Tim@13766 218 return -EINVAL;
Tim@13766 219 }
Tim@13766 220
Tim@13766 221 if ( hvm_load_entry(CPU, h, &ctxt) != 0 )
Tim@13766 222 return -EINVAL;
Tim@13766 223
Tim@13766 224 if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
kfraser@13618 225 return -EINVAL;
kfraser@13618 226
Tim@13694 227 /* Auxiliary processors should be woken immediately. */
kfraser@13618 228 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
kfraser@13618 229 vcpu_wake(v);
kfraser@13618 230
kfraser@13618 231 return 0;
kfraser@13618 232 }
kfraser@13618 233
Tim@13865 234 HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
Tim@13865 235 1, HVMSR_PER_VCPU);
Tim@13766 236
kaf24@12257 237 int hvm_vcpu_initialise(struct vcpu *v)
kaf24@8708 238 {
kaf24@12257 239 int rc;
kaf24@8708 240
kaf24@12271 241 if ( (rc = vlapic_init(v)) != 0 )
kaf24@12257 242 return rc;
kaf24@8708 243
kaf24@12271 244 if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
kfraser@12265 245 {
kaf24@12271 246 vlapic_destroy(v);
kfraser@12265 247 return rc;
kfraser@12265 248 }
kfraser@12265 249
kaf24@12257 250 /* Create ioreq event channel. */
kaf24@12257 251 v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
kaf24@12257 252 if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
kaf24@12257 253 get_vio(v->domain, v->vcpu_id)->vp_eport =
kaf24@12257 254 v->arch.hvm_vcpu.xen_port;
kaf24@8708 255
kfraser@13132 256 INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
kfraser@13132 257
kaf24@12257 258 if ( v->vcpu_id != 0 )
kaf24@12257 259 return 0;
kaf24@9777 260
Tim@13541 261 pit_init(v, cpu_khz);
Tim@13767 262 rtc_init(v, RTC_PORT(0));
kfraser@12504 263 pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
kfraser@13151 264 hpet_init(v);
Tim@13493 265
kaf24@12257 266 /* Init guest TSC to start from zero. */
kaf24@12026 267 hvm_set_guest_time(v, 0);
kaf24@12257 268
kaf24@12257 269 return 0;
kaf24@8708 270 }
kaf24@8708 271
kfraser@12265 272 void hvm_vcpu_destroy(struct vcpu *v)
kfraser@12265 273 {
kfraser@12265 274 vlapic_destroy(v);
kfraser@12265 275 hvm_funcs.vcpu_destroy(v);
kfraser@12265 276
kfraser@12265 277 /* Event channel is already freed by evtchn_destroy(). */
kfraser@12265 278 /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
kfraser@12265 279 }
kfraser@12265 280
kfraser@13858 281
kfraser@13858 282 void hvm_vcpu_reset(struct vcpu *v)
kfraser@13858 283 {
kfraser@13858 284 vcpu_pause(v);
kfraser@13858 285
kfraser@13858 286 vlapic_reset(vcpu_vlapic(v));
kfraser@13858 287
kfraser@13858 288 hvm_funcs.vcpu_initialise(v);
kfraser@13858 289
kfraser@13858 290 set_bit(_VCPUF_down, &v->vcpu_flags);
kfraser@13858 291 clear_bit(_VCPUF_initialised, &v->vcpu_flags);
kfraser@13858 292 clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
kfraser@13858 293 clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
kfraser@13858 294 clear_bit(_VCPUF_blocked, &v->vcpu_flags);
kfraser@13858 295
kfraser@13858 296 vcpu_unpause(v);
kfraser@13858 297 }
kfraser@13858 298
kfraser@11630 299 static void hvm_vcpu_down(void)
kfraser@11630 300 {
kfraser@11630 301 struct vcpu *v = current;
kfraser@11630 302 struct domain *d = v->domain;
kfraser@11630 303 int online_count = 0;
kfraser@11630 304
kaf24@12038 305 gdprintk(XENLOG_INFO, "DOM%d/VCPU%d: going offline.\n",
kaf24@12033 306 d->domain_id, v->vcpu_id);
kfraser@11630 307
kfraser@11630 308 /* Doesn't halt us immediately, but we'll never return to guest context. */
kfraser@11630 309 set_bit(_VCPUF_down, &v->vcpu_flags);
kfraser@11630 310 vcpu_sleep_nosync(v);
kfraser@11630 311
kfraser@11630 312 /* Any other VCPUs online? ... */
kfraser@11630 313 LOCK_BIGLOCK(d);
kfraser@11630 314 for_each_vcpu ( d, v )
kfraser@11630 315 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
kfraser@11630 316 online_count++;
kfraser@11630 317 UNLOCK_BIGLOCK(d);
kfraser@11630 318
kfraser@11630 319 /* ... Shut down the domain if not. */
kfraser@11630 320 if ( online_count == 0 )
kfraser@11630 321 {
kaf24@12038 322 gdprintk(XENLOG_INFO, "DOM%d: all CPUs offline -- powering off.\n",
kaf24@12033 323 d->domain_id);
kfraser@11630 324 domain_shutdown(d, SHUTDOWN_poweroff);
kfraser@11630 325 }
kfraser@11630 326 }
kfraser@11630 327
kfraser@12504 328 void hvm_send_assist_req(struct vcpu *v)
kfraser@12504 329 {
kfraser@12504 330 ioreq_t *p;
kfraser@12504 331
kfraser@12504 332 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
kfraser@12504 333 if ( unlikely(p->state != STATE_IOREQ_NONE) )
kfraser@12504 334 {
kfraser@12504 335 /* This indicates a bug in the device model. Crash the domain. */
kfraser@12504 336 gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
kfraser@12504 337 domain_crash_synchronous();
kfraser@12504 338 }
kfraser@12504 339
kfraser@12504 340 prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
kfraser@12504 341
kfraser@12504 342 /*
kfraser@12504 343 * Following happens /after/ blocking and setting up ioreq contents.
kfraser@12504 344 * prepare_wait_on_xen_event_channel() is an implicit barrier.
kfraser@12504 345 */
kfraser@12504 346 p->state = STATE_IOREQ_READY;
kfraser@12504 347 notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
kfraser@12504 348 }
kfraser@12504 349
kfraser@11164 350 void hvm_hlt(unsigned long rflags)
kfraser@11164 351 {
kfraser@11164 352 /*
kfraser@11630 353 * If we halt with interrupts disabled, that's a pretty sure sign that we
kfraser@11630 354 * want to shut down. In a real processor, NMIs are the only way to break
kfraser@11630 355 * out of this.
kfraser@11164 356 */
kfraser@11630 357 if ( unlikely(!(rflags & X86_EFLAGS_IF)) )
kfraser@11630 358 return hvm_vcpu_down();
kfraser@11164 359
kfraser@11164 360 do_sched_op_compat(SCHEDOP_block, 0);
kfraser@11164 361 }
kfraser@11164 362
kfraser@13242 363 void hvm_triple_fault(void)
kfraser@13242 364 {
kfraser@13243 365 struct vcpu *v = current;
kfraser@13243 366 gdprintk(XENLOG_INFO, "Triple fault on VCPU%d - "
kfraser@13243 367 "invoking HVM system reset.\n", v->vcpu_id);
kfraser@13243 368 domain_shutdown(v->domain, SHUTDOWN_reboot);
kfraser@13242 369 }
kfraser@13242 370
kaf24@8708 371 /*
kfraser@11676 372 * __hvm_copy():
kfraser@11676 373 * @buf = hypervisor buffer
Tim@12567 374 * @addr = guest address to copy to/from
kfraser@11676 375 * @size = number of bytes to copy
kfraser@11677 376 * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)?
Tim@12567 377 * @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)?
kfraser@11677 378 * Returns number of bytes failed to copy (0 == complete success).
kaf24@8708 379 */
Tim@12567 380 static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, int virt)
kaf24@8708 381 {
kfraser@10948 382 unsigned long mfn;
kfraser@11676 383 char *p;
kfraser@11677 384 int count, todo;
kaf24@8708 385
kfraser@11677 386 todo = size;
kfraser@11677 387 while ( todo > 0 )
kfraser@11676 388 {
kfraser@11677 389 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
kaf24@8708 390
Tim@12567 391 if ( virt )
Tim@13909 392 mfn = get_mfn_from_gpfn(paging_gva_to_gfn(current, addr));
Tim@12567 393 else
Tim@12567 394 mfn = get_mfn_from_gpfn(addr >> PAGE_SHIFT);
Tim@12567 395
kfraser@11676 396 if ( mfn == INVALID_MFN )
kfraser@11677 397 return todo;
kaf24@8708 398
kfraser@11676 399 p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
kaf24@8708 400
kfraser@11677 401 if ( dir )
kfraser@11677 402 memcpy(p, buf, count); /* dir == TRUE: *to* guest */
kaf24@8708 403 else
kfraser@11677 404 memcpy(buf, p, count); /* dir == FALSE: *from guest */
kaf24@8708 405
kfraser@11676 406 unmap_domain_page(p);
Tim@14137 407
Tim@14137 408 mark_dirty(current->domain, mfn);
kaf24@8708 409
kfraser@11676 410 addr += count;
kfraser@11676 411 buf += count;
kfraser@11677 412 todo -= count;
kaf24@8708 413 }
kaf24@8708 414
kfraser@11677 415 return 0;
kfraser@11677 416 }
kfraser@11677 417
Tim@12064 418 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
kfraser@11677 419 {
Tim@12567 420 return __hvm_copy(buf, paddr, size, 1, 0);
kaf24@8708 421 }
kaf24@8708 422
Tim@12064 423 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
kfraser@11676 424 {
Tim@12567 425 return __hvm_copy(buf, paddr, size, 0, 0);
kfraser@11676 426 }
kfraser@11676 427
kfraser@11677 428 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
kfraser@11676 429 {
Tim@12567 430 return __hvm_copy(buf, vaddr, size, 1, 1);
kfraser@11677 431 }
kfraser@11677 432
kfraser@11677 433 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
kfraser@11677 434 {
Tim@12567 435 return __hvm_copy(buf, vaddr, size, 0, 1);
kfraser@11676 436 }
kfraser@11676 437
Tim@12567 438
kaf24@12232 439 /* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
kaf24@8708 440 void hvm_print_line(struct vcpu *v, const char c)
kaf24@8708 441 {
kaf24@12232 442 struct hvm_domain *hd = &v->domain->arch.hvm_domain;
kaf24@8708 443
kaf24@12232 444 spin_lock(&hd->pbuf_lock);
kaf24@12232 445 hd->pbuf[hd->pbuf_idx++] = c;
kaf24@12232 446 if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
kaf24@12232 447 {
kaf24@12232 448 if ( c != '\n' )
kaf24@12232 449 hd->pbuf[hd->pbuf_idx++] = '\n';
kaf24@12232 450 hd->pbuf[hd->pbuf_idx] = '\0';
kfraser@12249 451 printk(XENLOG_G_DEBUG "HVM%u: %s", v->domain->domain_id, hd->pbuf);
kaf24@12232 452 hd->pbuf_idx = 0;
kaf24@12232 453 }
kaf24@12232 454 spin_unlock(&hd->pbuf_lock);
kaf24@8708 455 }
kaf24@8708 456
kfraser@13129 457 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
kfraser@13129 458 unsigned int *ecx, unsigned int *edx)
kfraser@13129 459 {
kfraser@13129 460 if ( !cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) )
kfraser@13129 461 {
kfraser@13129 462 cpuid(input, eax, ebx, ecx, edx);
kfraser@13129 463
kfraser@13129 464 if ( input == 0x00000001 )
kfraser@13129 465 {
kfraser@13129 466 struct vcpu *v = current;
kfraser@13129 467
kfraser@13129 468 clear_bit(X86_FEATURE_MWAIT & 31, ecx);
kfraser@13129 469
kfraser@13129 470 if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
kfraser@13129 471 clear_bit(X86_FEATURE_APIC & 31, edx);
kfraser@13129 472
kfraser@13129 473 #if CONFIG_PAGING_LEVELS >= 3
kfraser@13129 474 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
kfraser@13129 475 #endif
kfraser@13129 476 clear_bit(X86_FEATURE_PAE & 31, edx);
kfraser@13129 477 clear_bit(X86_FEATURE_PSE36 & 31, edx);
kfraser@13129 478 }
kfraser@13129 479 else if ( input == 0x80000001 )
kfraser@13129 480 {
kfraser@13129 481 #if CONFIG_PAGING_LEVELS >= 3
kfraser@13129 482 struct vcpu *v = current;
kfraser@13129 483 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
kfraser@13129 484 #endif
kfraser@13129 485 clear_bit(X86_FEATURE_NX & 31, edx);
kfraser@13129 486 #ifdef __i386__
kfraser@13129 487 /* Mask feature for Intel ia32e or AMD long mode. */
kfraser@13129 488 clear_bit(X86_FEATURE_LAHF_LM & 31, ecx);
kfraser@13129 489
kfraser@13129 490 clear_bit(X86_FEATURE_LM & 31, edx);
kfraser@13129 491 clear_bit(X86_FEATURE_SYSCALL & 31, edx);
kfraser@13129 492 #endif
kfraser@13129 493 }
kfraser@13129 494 }
kfraser@13129 495 }
kfraser@13129 496
kfraser@10892 497 typedef unsigned long hvm_hypercall_t(
kfraser@10892 498 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
kfraser@10976 499
kfraser@10976 500 #define HYPERCALL(x) \
kfraser@10976 501 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
kfraser@10976 502 #define HYPERCALL_COMPAT32(x) \
kfraser@10976 503 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x ## _compat32
kfraser@10976 504
kfraser@10976 505 #if defined(__i386__)
kfraser@10976 506
kfraser@12275 507 static hvm_hypercall_t *hvm_hypercall_table[NR_hypercalls] = {
kfraser@10892 508 HYPERCALL(memory_op),
kfraser@10892 509 HYPERCALL(multicall),
kfraser@10892 510 HYPERCALL(xen_version),
kfraser@10939 511 HYPERCALL(event_channel_op),
kfraser@10939 512 HYPERCALL(hvm_op)
kfraser@10892 513 };
kfraser@10892 514
kfraser@10892 515 void hvm_do_hypercall(struct cpu_user_regs *pregs)
kfraser@10892 516 {
kfraser@10976 517 if ( unlikely(ring_3(pregs)) )
kfraser@10892 518 {
kfraser@10892 519 pregs->eax = -EPERM;
kfraser@10892 520 return;
kfraser@10892 521 }
kfraser@10892 522
kfraser@10976 523 if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
kfraser@10892 524 {
kfraser@12242 525 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %d.\n",
kfraser@10892 526 current->domain->domain_id, current->vcpu_id,
kfraser@10892 527 pregs->eax);
kfraser@10892 528 pregs->eax = -ENOSYS;
kfraser@10976 529 return;
kfraser@10976 530 }
kfraser@10976 531
kfraser@10976 532 pregs->eax = hvm_hypercall_table[pregs->eax](
kfraser@10976 533 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
kfraser@10976 534 }
kfraser@10976 535
kfraser@10976 536 #else /* defined(__x86_64__) */
kfraser@10976 537
kfraser@10976 538 static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
kfraser@10976 539 {
kfraser@10976 540 extern long do_add_to_physmap(struct xen_add_to_physmap *xatp);
kfraser@10976 541 long rc;
kfraser@10976 542
kfraser@10976 543 switch ( cmd )
kfraser@10976 544 {
kfraser@10976 545 case XENMEM_add_to_physmap:
kfraser@10976 546 {
kfraser@10976 547 struct {
kfraser@10976 548 domid_t domid;
kfraser@10976 549 uint32_t space;
kfraser@10976 550 uint32_t idx;
kfraser@10976 551 uint32_t gpfn;
kfraser@10976 552 } u;
kfraser@10976 553 struct xen_add_to_physmap h;
kfraser@10976 554
kfraser@10976 555 if ( copy_from_guest(&u, arg, 1) )
kfraser@10976 556 return -EFAULT;
kfraser@10976 557
kfraser@10976 558 h.domid = u.domid;
kfraser@10976 559 h.space = u.space;
kfraser@10976 560 h.idx = u.idx;
kfraser@10976 561 h.gpfn = u.gpfn;
kfraser@10976 562
kfraser@10976 563 this_cpu(guest_handles_in_xen_space) = 1;
kfraser@10976 564 rc = do_memory_op(cmd, guest_handle_from_ptr(&h, void));
kfraser@10976 565 this_cpu(guest_handles_in_xen_space) = 0;
kfraser@10976 566
kfraser@10976 567 break;
kfraser@10976 568 }
kfraser@10976 569
kfraser@10976 570 default:
kfraser@12242 571 gdprintk(XENLOG_WARNING, "memory_op %d.\n", cmd);
kfraser@10976 572 rc = -ENOSYS;
kfraser@10976 573 break;
kfraser@10976 574 }
kfraser@10976 575
kfraser@10976 576 return rc;
kfraser@10976 577 }
kfraser@10976 578
kfraser@10976 579 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
kfraser@10976 580 HYPERCALL(memory_op),
kfraser@10976 581 HYPERCALL(xen_version),
kfraser@10976 582 HYPERCALL(hvm_op),
kfraser@10976 583 HYPERCALL(event_channel_op)
kfraser@10976 584 };
kfraser@10976 585
kfraser@10976 586 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
kfraser@10976 587 HYPERCALL_COMPAT32(memory_op),
kfraser@10976 588 HYPERCALL(xen_version),
kfraser@10976 589 HYPERCALL(hvm_op),
kfraser@10976 590 HYPERCALL(event_channel_op)
kfraser@10976 591 };
kfraser@10976 592
kfraser@10976 593 void hvm_do_hypercall(struct cpu_user_regs *pregs)
kfraser@10976 594 {
kfraser@10976 595 if ( unlikely(ring_3(pregs)) )
kfraser@10976 596 {
kfraser@10976 597 pregs->rax = -EPERM;
kfraser@10976 598 return;
kfraser@10976 599 }
kfraser@10976 600
kfraser@10976 601 pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
kfraser@10976 602 if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
kfraser@10976 603 {
kfraser@12242 604 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %ld.\n",
kfraser@10976 605 current->domain->domain_id, current->vcpu_id,
kfraser@10976 606 pregs->rax);
kfraser@10976 607 pregs->rax = -ENOSYS;
kfraser@10976 608 return;
kfraser@10976 609 }
kfraser@10976 610
Tim@13909 611 if ( current->arch.paging.mode->guest_levels == 4 )
kfraser@10976 612 {
kfraser@10976 613 pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
kfraser@10976 614 pregs->rsi,
kfraser@10976 615 pregs->rdx,
kfraser@10976 616 pregs->r10,
kfraser@10976 617 pregs->r8);
kfraser@10892 618 }
kfraser@10892 619 else
kfraser@10892 620 {
kfraser@10976 621 pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
kfraser@10976 622 (uint32_t)pregs->ecx,
kfraser@10976 623 (uint32_t)pregs->edx,
kfraser@10976 624 (uint32_t)pregs->esi,
kfraser@10976 625 (uint32_t)pregs->edi);
kfraser@10892 626 }
kfraser@10892 627 }
kfraser@10892 628
kfraser@10976 629 #endif /* defined(__x86_64__) */
kfraser@10892 630
steven@13059 631 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
steven@13059 632 {
steven@13059 633 v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
steven@13059 634 hvm_funcs.update_guest_cr3(v);
steven@13059 635 }
steven@13059 636
kfraser@10892 637 /* Initialise a hypercall transfer page for a VMX domain using
kfraser@10892 638 paravirtualised drivers. */
kfraser@10892 639 void hvm_hypercall_page_initialise(struct domain *d,
kfraser@10892 640 void *hypercall_page)
kfraser@10892 641 {
kfraser@10892 642 hvm_funcs.init_hypercall_page(d, hypercall_page);
kfraser@10892 643 }
kfraser@10892 644
kfraser@10892 645
kaf24@8708 646 /*
kaf24@9016 647 * only called in HVM domain BSP context
kaf24@9016 648 * when booting, vcpuid is always equal to apic_id
kaf24@9016 649 */
kaf24@9016 650 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
kaf24@9016 651 {
kfraser@13858 652 struct vcpu *v;
kfraser@13858 653 struct domain *d = current->domain;
kaf24@9016 654 struct vcpu_guest_context *ctxt;
kaf24@9016 655 int rc = 0;
kaf24@9016 656
kfraser@12210 657 BUG_ON(!is_hvm_domain(d));
kfraser@11630 658
kaf24@9016 659 if ( (v = d->vcpu[vcpuid]) == NULL )
kaf24@9016 660 return -ENOENT;
kaf24@9016 661
kfraser@11630 662 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
kfraser@11630 663 {
kfraser@12242 664 gdprintk(XENLOG_ERR,
kaf24@12033 665 "Failed to allocate memory in hvm_bringup_ap.\n");
kaf24@9016 666 return -ENOMEM;
kaf24@9016 667 }
kaf24@9016 668
kaf24@9016 669 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
kaf24@9016 670
kfraser@12264 671 /* Sync AP's TSC with BSP's. */
kfraser@12264 672 v->arch.hvm_vcpu.cache_tsc_offset =
kfraser@12264 673 v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
kfraser@12264 674 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
kfraser@12264 675
kaf24@9016 676 LOCK_BIGLOCK(d);
kaf24@9016 677 rc = -EEXIST;
kaf24@9016 678 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
kaf24@9016 679 rc = boot_vcpu(d, vcpuid, ctxt);
kaf24@9016 680 UNLOCK_BIGLOCK(d);
kaf24@9016 681
kaf24@9016 682 if ( rc != 0 )
kfraser@11630 683 {
kfraser@12242 684 gdprintk(XENLOG_ERR,
kaf24@12033 685 "AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
kfraser@11795 686 goto out;
kaf24@9016 687 }
kaf24@9016 688
kfraser@13858 689 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
kfraser@13858 690 vcpu_wake(v);
kaf24@12038 691 gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
kfraser@11630 692
kfraser@11795 693 out:
kaf24@9016 694 xfree(ctxt);
kaf24@9016 695 return rc;
kaf24@9016 696 }
kaf24@9016 697
kfraser@12535 698 static int hvmop_set_pci_intx_level(
kfraser@12535 699 XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop)
kfraser@12535 700 {
kfraser@12535 701 struct xen_hvm_set_pci_intx_level op;
kfraser@12535 702 struct domain *d;
kfraser@12535 703 int rc;
kfraser@12535 704
kfraser@12535 705 if ( copy_from_guest(&op, uop, 1) )
kfraser@12535 706 return -EFAULT;
kfraser@12535 707
kfraser@12535 708 if ( !IS_PRIV(current->domain) )
kfraser@12535 709 return -EPERM;
kfraser@12535 710
kfraser@12535 711 if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
kfraser@12535 712 return -EINVAL;
kfraser@12535 713
kaf24@13663 714 d = get_domain_by_id(op.domid);
kfraser@12535 715 if ( d == NULL )
kfraser@12535 716 return -ESRCH;
kfraser@12535 717
kfraser@12535 718 rc = -EINVAL;
kfraser@12535 719 if ( !is_hvm_domain(d) )
kfraser@12535 720 goto out;
kfraser@12535 721
kfraser@12535 722 rc = 0;
kfraser@12535 723 switch ( op.level )
kfraser@12535 724 {
kfraser@12535 725 case 0:
kfraser@12535 726 hvm_pci_intx_deassert(d, op.device, op.intx);
kfraser@12535 727 break;
kfraser@12535 728 case 1:
kfraser@12535 729 hvm_pci_intx_assert(d, op.device, op.intx);
kfraser@12535 730 break;
kfraser@12535 731 default:
kfraser@12535 732 rc = -EINVAL;
kfraser@12535 733 break;
kfraser@12535 734 }
kfraser@12535 735
kfraser@12535 736 out:
kfraser@12535 737 put_domain(d);
kfraser@12535 738 return rc;
kfraser@12535 739 }
kfraser@12535 740
kfraser@12535 741 static int hvmop_set_isa_irq_level(
kfraser@12535 742 XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
kfraser@12535 743 {
kfraser@12535 744 struct xen_hvm_set_isa_irq_level op;
kfraser@12535 745 struct domain *d;
kfraser@12535 746 int rc;
kfraser@12535 747
kfraser@12535 748 if ( copy_from_guest(&op, uop, 1) )
kfraser@12535 749 return -EFAULT;
kfraser@12535 750
kfraser@12535 751 if ( !IS_PRIV(current->domain) )
kfraser@12535 752 return -EPERM;
kfraser@12535 753
kfraser@12535 754 if ( op.isa_irq > 15 )
kfraser@12535 755 return -EINVAL;
kfraser@12535 756
kaf24@13663 757 d = get_domain_by_id(op.domid);
kfraser@12535 758 if ( d == NULL )
kfraser@12535 759 return -ESRCH;
kfraser@12535 760
kfraser@12535 761 rc = -EINVAL;
kfraser@12535 762 if ( !is_hvm_domain(d) )
kfraser@12535 763 goto out;
kfraser@12535 764
kfraser@12535 765 rc = 0;
kfraser@12535 766 switch ( op.level )
kfraser@12535 767 {
kfraser@12535 768 case 0:
kfraser@12535 769 hvm_isa_irq_deassert(d, op.isa_irq);
kfraser@12535 770 break;
kfraser@12535 771 case 1:
kfraser@12535 772 hvm_isa_irq_assert(d, op.isa_irq);
kfraser@12535 773 break;
kfraser@12535 774 default:
kfraser@12535 775 rc = -EINVAL;
kfraser@12535 776 break;
kfraser@12535 777 }
kfraser@12535 778
kfraser@12535 779 out:
kfraser@12535 780 put_domain(d);
kfraser@12535 781 return rc;
kfraser@12535 782 }
kfraser@12535 783
kfraser@12535 784 static int hvmop_set_pci_link_route(
kfraser@12535 785 XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t) uop)
kfraser@12535 786 {
kfraser@12535 787 struct xen_hvm_set_pci_link_route op;
kfraser@12535 788 struct domain *d;
kfraser@12535 789 int rc;
kfraser@12535 790
kfraser@12535 791 if ( copy_from_guest(&op, uop, 1) )
kfraser@12535 792 return -EFAULT;
kfraser@12535 793
kfraser@12535 794 if ( !IS_PRIV(current->domain) )
kfraser@12535 795 return -EPERM;
kfraser@12535 796
kfraser@12535 797 if ( (op.link > 3) || (op.isa_irq > 15) )
kfraser@12535 798 return -EINVAL;
kfraser@12535 799
kaf24@13663 800 d = get_domain_by_id(op.domid);
kfraser@12535 801 if ( d == NULL )
kfraser@12535 802 return -ESRCH;
kfraser@12535 803
kfraser@12535 804 rc = -EINVAL;
kfraser@12535 805 if ( !is_hvm_domain(d) )
kfraser@12535 806 goto out;
kfraser@12535 807
kfraser@12535 808 rc = 0;
kfraser@12535 809 hvm_set_pci_link_route(d, op.link, op.isa_irq);
kfraser@12535 810
kfraser@12535 811 out:
kfraser@12535 812 put_domain(d);
kfraser@12535 813 return rc;
kfraser@12535 814 }
kfraser@12535 815
kfraser@10939 816 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
kfraser@10939 817
kfraser@10939 818 {
kfraser@10939 819 long rc = 0;
kfraser@10939 820
kfraser@10939 821 switch ( op )
kfraser@10939 822 {
kfraser@10939 823 case HVMOP_set_param:
kfraser@10939 824 case HVMOP_get_param:
kfraser@10939 825 {
kfraser@10939 826 struct xen_hvm_param a;
kfraser@10939 827 struct domain *d;
kfraser@12226 828 struct vcpu *v;
kfraser@12226 829 unsigned long mfn;
kfraser@12226 830 void *p;
kfraser@10939 831
kfraser@10939 832 if ( copy_from_guest(&a, arg, 1) )
kfraser@10939 833 return -EFAULT;
kfraser@10939 834
kfraser@10939 835 if ( a.index >= HVM_NR_PARAMS )
kfraser@10939 836 return -EINVAL;
kfraser@10939 837
kfraser@10939 838 if ( a.domid == DOMID_SELF )
kfraser@10939 839 {
kfraser@10939 840 get_knownalive_domain(current->domain);
kfraser@10939 841 d = current->domain;
kfraser@10939 842 }
kfraser@10939 843 else if ( IS_PRIV(current->domain) )
kfraser@10939 844 {
kaf24@13663 845 d = get_domain_by_id(a.domid);
kaf24@11091 846 if ( d == NULL )
kfraser@10939 847 return -ESRCH;
kfraser@10939 848 }
kfraser@10939 849 else
kfraser@10939 850 {
kfraser@10939 851 return -EPERM;
kfraser@10939 852 }
kfraser@10939 853
kfraser@12226 854 rc = -EINVAL;
kfraser@12226 855 if ( !is_hvm_domain(d) )
kfraser@12226 856 goto param_fail;
kfraser@12226 857
kfraser@10939 858 if ( op == HVMOP_set_param )
kfraser@10939 859 {
kfraser@12226 860 switch ( a.index )
kfraser@12226 861 {
kfraser@12226 862 case HVM_PARAM_IOREQ_PFN:
kfraser@12226 863 if ( d->arch.hvm_domain.shared_page_va )
kfraser@12226 864 goto param_fail;
kfraser@12226 865 mfn = gmfn_to_mfn(d, a.value);
kfraser@12226 866 if ( mfn == INVALID_MFN )
kfraser@12226 867 goto param_fail;
kfraser@12226 868 p = map_domain_page_global(mfn);
kfraser@12226 869 if ( p == NULL )
kfraser@12226 870 goto param_fail;
kfraser@12226 871 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
kfraser@12226 872 /* Initialise evtchn port info if VCPUs already created. */
kfraser@12226 873 for_each_vcpu ( d, v )
kfraser@12226 874 get_vio(d, v->vcpu_id)->vp_eport =
kfraser@12226 875 v->arch.hvm_vcpu.xen_port;
kfraser@12226 876 break;
kfraser@12226 877 case HVM_PARAM_BUFIOREQ_PFN:
kfraser@12226 878 if ( d->arch.hvm_domain.buffered_io_va )
kfraser@12226 879 goto param_fail;
kfraser@12226 880 mfn = gmfn_to_mfn(d, a.value);
kfraser@12226 881 if ( mfn == INVALID_MFN )
kfraser@12226 882 goto param_fail;
kfraser@12226 883 p = map_domain_page_global(mfn);
kfraser@12226 884 if ( p == NULL )
kfraser@12226 885 goto param_fail;
kfraser@12226 886 d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
kfraser@12226 887 break;
kfraser@12535 888 case HVM_PARAM_CALLBACK_IRQ:
kfraser@13374 889 hvm_set_callback_via(d, a.value);
kfraser@12535 890 break;
kfraser@12226 891 }
kaf24@11091 892 d->arch.hvm_domain.params[a.index] = a.value;
kfraser@10939 893 rc = 0;
kfraser@10939 894 }
kfraser@10939 895 else
kfraser@10939 896 {
kaf24@11091 897 a.value = d->arch.hvm_domain.params[a.index];
kaf24@11091 898 rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
kfraser@10939 899 }
kfraser@10939 900
kfraser@12226 901 param_fail:
kfraser@10939 902 put_domain(d);
kaf24@11091 903 break;
kfraser@10939 904 }
kfraser@10939 905
kfraser@12535 906 case HVMOP_set_pci_intx_level:
kfraser@12535 907 rc = hvmop_set_pci_intx_level(
kfraser@12535 908 guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
kfraser@12535 909 break;
kfraser@12289 910
kfraser@12535 911 case HVMOP_set_isa_irq_level:
kfraser@12535 912 rc = hvmop_set_isa_irq_level(
kfraser@12535 913 guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
kfraser@12535 914 break;
kfraser@12289 915
kfraser@12535 916 case HVMOP_set_pci_link_route:
kfraser@12535 917 rc = hvmop_set_pci_link_route(
kfraser@12535 918 guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
kfraser@12289 919 break;
kfraser@12289 920
kfraser@10939 921 default:
kfraser@10939 922 {
kfraser@12242 923 gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
kfraser@10939 924 rc = -ENOSYS;
kaf24@11091 925 break;
kfraser@10939 926 }
kfraser@10939 927 }
kfraser@10939 928
kfraser@10939 929 return rc;
kfraser@10939 930 }
kfraser@10939 931
kaf24@9016 932 /*
kaf24@8708 933 * Local variables:
kaf24@8708 934 * mode: C
kaf24@8708 935 * c-set-style: "BSD"
kaf24@8708 936 * c-basic-offset: 4
kaf24@8708 937 * tab-width: 4
kaf24@8708 938 * indent-tabs-mode: nil
kaf24@8708 939 * End:
kaf24@8708 940 */
kaf24@8708 941