ia64/xen-unstable

annotate xen/arch/ia64/vmx/vmx_init.c @ 8492:9fc306e40a7c

Move public hvm interfaces into xen/include/public/hvm.
Add new header hvm_info_table.h for defining location and
contents of acpi-style hvm_info_table. Remove duplicate
definition in vmxassist/acpi_madt.c.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Jan 03 14:58:34 2006 +0100 (2006-01-03)
parents 2d5c57be196d
children f1b361b05bf3
rev   line source
djm@6458 1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
djm@6458 2 /*
djm@6458 3 * vmx_init.c: initialization work for vt specific domain
djm@6458 4 * Copyright (c) 2005, Intel Corporation.
djm@6458 5 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
djm@6458 6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
djm@6458 7 * Fred Yang <fred.yang@intel.com>
djm@6458 8 *
djm@6458 9 * This program is free software; you can redistribute it and/or modify it
djm@6458 10 * under the terms and conditions of the GNU General Public License,
djm@6458 11 * version 2, as published by the Free Software Foundation.
djm@6458 12 *
djm@6458 13 * This program is distributed in the hope it will be useful, but WITHOUT
djm@6458 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
djm@6458 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
djm@6458 16 * more details.
djm@6458 17 *
djm@6458 18 * You should have received a copy of the GNU General Public License along with
djm@6458 19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
djm@6458 20 * Place - Suite 330, Boston, MA 02111-1307 USA.
djm@6458 21 *
djm@6458 22 */
djm@6458 23
djm@6458 24 /*
djm@6458 25 * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@intel.com>:
djm@6458 26 * Disable doubling mapping
djm@6458 27 *
djm@6458 28 * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
djm@6458 29 * Simplied design in first step:
djm@6458 30 * - One virtual environment
djm@6458 31 * - Domain is bound to one LP
djm@6458 32 * Later to support guest SMP:
djm@6458 33 * - Need interface to handle VP scheduled to different LP
djm@6458 34 */
djm@6458 35 #include <xen/config.h>
djm@6458 36 #include <xen/types.h>
djm@6458 37 #include <xen/sched.h>
djm@6458 38 #include <asm/pal.h>
djm@6458 39 #include <asm/page.h>
djm@6458 40 #include <asm/processor.h>
djm@6458 41 #include <asm/vmx_vcpu.h>
djm@6458 42 #include <xen/lib.h>
djm@6458 43 #include <asm/vmmu.h>
djm@6458 44 #include <public/arch-ia64.h>
kaf24@8492 45 #include <public/hvm/ioreq.h>
djm@6458 46 #include <asm/vmx_phy_mode.h>
djm@6458 47 #include <asm/processor.h>
djm@6458 48 #include <asm/vmx.h>
djm@6458 49 #include <xen/mm.h>
djm@7333 50 #include <public/arch-ia64.h>
kaf24@7720 51 #include <asm/vmx_vioapic.h>
djm@6458 52
djm@6458 53 /* Global flag to identify whether Intel vmx feature is on */
djm@6458 54 u32 vmx_enabled = 0;
kaf24@7720 55 unsigned int opt_vmx_debug_level = 0;
djm@6458 56 static u32 vm_order;
djm@6458 57 static u64 buffer_size;
djm@6458 58 static u64 vp_env_info;
djm@6458 59 static u64 vm_buffer = 0; /* Buffer required to bring up VMX feature */
djm@6458 60 u64 __vsa_base = 0; /* Run-time service base of VMX */
djm@6458 61
djm@6458 62 /* Check whether vt feature is enabled or not. */
djm@6458 63 void
djm@6458 64 identify_vmx_feature(void)
djm@6458 65 {
djm@6458 66 pal_status_t ret;
djm@6458 67 u64 avail = 1, status = 1, control = 1;
djm@6458 68
djm@6458 69 vmx_enabled = 0;
djm@6458 70 /* Check VT-i feature */
djm@6458 71 ret = ia64_pal_proc_get_features(&avail, &status, &control);
djm@6458 72 if (ret != PAL_STATUS_SUCCESS) {
djm@6458 73 printk("Get proc features failed.\n");
djm@6458 74 goto no_vti;
djm@6458 75 }
djm@6458 76
djm@6458 77 /* FIXME: do we need to check status field, to see whether
djm@6458 78 * PSR.vm is actually enabled? If yes, aonther call to
djm@6458 79 * ia64_pal_proc_set_features may be reuqired then.
djm@6458 80 */
djm@6458 81 printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n",
djm@6458 82 avail, status, control, avail & PAL_PROC_VM_BIT);
djm@6458 83 if (!(avail & PAL_PROC_VM_BIT)) {
djm@6458 84 printk("No VT feature supported.\n");
djm@6458 85 goto no_vti;
djm@6458 86 }
djm@6458 87
djm@6458 88 ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info);
djm@6458 89 if (ret != PAL_STATUS_SUCCESS) {
djm@6458 90 printk("Get vp environment info failed.\n");
djm@6458 91 goto no_vti;
djm@6458 92 }
djm@6458 93
djm@6458 94 /* Does xen has ability to decode itself? */
djm@6458 95 if (!(vp_env_info & VP_OPCODE))
djm@6458 96 printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
djm@6458 97 vm_order = get_order(buffer_size);
djm@6458 98 printk("vm buffer size: %d, order: %d\n", buffer_size, vm_order);
djm@6458 99
djm@6458 100 vmx_enabled = 1;
djm@6458 101 no_vti:
djm@6458 102 return;
djm@6458 103 }
djm@6458 104
djm@6458 105 /*
djm@6458 106 * Init virtual environment on current LP
djm@6458 107 * vsa_base is the indicator whether it's first LP to be initialized
djm@6458 108 * for current domain.
djm@6458 109 */
djm@6458 110 void
djm@6458 111 vmx_init_env(void)
djm@6458 112 {
djm@6458 113 u64 status, tmp_base;
djm@6458 114
djm@6458 115 if (!vm_buffer) {
djm@6458 116 vm_buffer = alloc_xenheap_pages(vm_order);
djm@6458 117 ASSERT(vm_buffer);
djm@6458 118 printk("vm_buffer: 0x%lx\n", vm_buffer);
djm@6458 119 }
djm@6458 120
djm@6458 121 status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
djm@6458 122 __pa(vm_buffer),
djm@6458 123 vm_buffer,
djm@6458 124 &tmp_base);
djm@6458 125
djm@6458 126 if (status != PAL_STATUS_SUCCESS) {
djm@6458 127 printk("ia64_pal_vp_init_env failed.\n");
djm@6458 128 return -1;
djm@6458 129 }
djm@6458 130
djm@6458 131 if (!__vsa_base)
djm@6458 132 __vsa_base = tmp_base;
djm@6458 133 else
djm@6458 134 ASSERT(tmp_base != __vsa_base);
djm@6458 135
djm@6458 136 }
djm@6458 137
djm@6458 138 typedef union {
djm@6458 139 u64 value;
djm@6458 140 struct {
djm@6458 141 u64 number : 8;
djm@6458 142 u64 revision : 8;
djm@6458 143 u64 model : 8;
djm@6458 144 u64 family : 8;
djm@6458 145 u64 archrev : 8;
djm@6458 146 u64 rv : 24;
djm@6458 147 };
djm@6458 148 } cpuid3_t;
djm@6458 149
djm@6458 150 /* Allocate vpd from xenheap */
djm@6458 151 static vpd_t *alloc_vpd(void)
djm@6458 152 {
djm@6458 153 int i;
djm@6458 154 cpuid3_t cpuid3;
djm@6458 155 vpd_t *vpd;
djm@6458 156
djm@6458 157 vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
djm@6458 158 if (!vpd) {
djm@6458 159 printk("VPD allocation failed.\n");
djm@6458 160 return NULL;
djm@6458 161 }
djm@6458 162
djm@6458 163 printk("vpd base: 0x%lx, vpd size:%d\n", vpd, sizeof(vpd_t));
djm@6458 164 memset(vpd, 0, VPD_SIZE);
djm@6458 165 /* CPUID init */
djm@6458 166 for (i = 0; i < 5; i++)
djm@6458 167 vpd->vcpuid[i] = ia64_get_cpuid(i);
djm@6458 168
djm@6458 169 /* Limit the CPUID number to 5 */
djm@6458 170 cpuid3.value = vpd->vcpuid[3];
djm@6458 171 cpuid3.number = 4; /* 5 - 1 */
djm@6458 172 vpd->vcpuid[3] = cpuid3.value;
djm@6458 173
djm@6458 174 vpd->vdc.d_vmsw = 1;
djm@6458 175 return vpd;
djm@6458 176 }
djm@6458 177
djm@6458 178
djm@6458 179 /*
djm@6458 180 * Create a VP on intialized VMX environment.
djm@6458 181 */
djm@6458 182 static void
djm@6458 183 vmx_create_vp(struct vcpu *v)
djm@6458 184 {
djm@6458 185 u64 ret;
djm@6801 186 vpd_t *vpd = v->arch.privregs;
djm@6458 187 u64 ivt_base;
djm@6458 188 extern char vmx_ia64_ivt;
djm@6458 189 /* ia64_ivt is function pointer, so need this tranlation */
djm@6458 190 ivt_base = (u64) &vmx_ia64_ivt;
djm@6458 191 printk("ivt_base: 0x%lx\n", ivt_base);
djm@6458 192 ret = ia64_pal_vp_create(vpd, ivt_base, 0);
djm@6458 193 if (ret != PAL_STATUS_SUCCESS)
djm@6458 194 panic("ia64_pal_vp_create failed. \n");
djm@6458 195 }
djm@6458 196
djm@6458 197 /* Other non-context related tasks can be done in context switch */
djm@6458 198 void
djm@6458 199 vmx_save_state(struct vcpu *v)
djm@6458 200 {
djm@6458 201 u64 status, psr;
djm@6458 202 u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
djm@6458 203
djm@6458 204 /* FIXME: about setting of pal_proc_vector... time consuming */
djm@6801 205 status = ia64_pal_vp_save(v->arch.privregs, 0);
djm@6458 206 if (status != PAL_STATUS_SUCCESS)
djm@6458 207 panic("Save vp status failed\n");
djm@6458 208
djm@6458 209
djm@6458 210 /* Need to save KR when domain switch, though HV itself doesn;t
djm@6458 211 * use them.
djm@6458 212 */
djm@6458 213 v->arch.arch_vmx.vkr[0] = ia64_get_kr(0);
djm@6458 214 v->arch.arch_vmx.vkr[1] = ia64_get_kr(1);
djm@6458 215 v->arch.arch_vmx.vkr[2] = ia64_get_kr(2);
djm@6458 216 v->arch.arch_vmx.vkr[3] = ia64_get_kr(3);
djm@6458 217 v->arch.arch_vmx.vkr[4] = ia64_get_kr(4);
djm@6458 218 v->arch.arch_vmx.vkr[5] = ia64_get_kr(5);
djm@6458 219 v->arch.arch_vmx.vkr[6] = ia64_get_kr(6);
djm@6458 220 v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);
djm@6458 221 }
djm@6458 222
djm@6458 223 /* Even guest is in physical mode, we still need such double mapping */
djm@6458 224 void
djm@6458 225 vmx_load_state(struct vcpu *v)
djm@6458 226 {
djm@6458 227 u64 status, psr;
djm@6458 228 u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
djm@6458 229 u64 pte_xen, pte_vhpt;
djm@6458 230 int i;
djm@6458 231
djm@6801 232 status = ia64_pal_vp_restore(v->arch.privregs, 0);
djm@6458 233 if (status != PAL_STATUS_SUCCESS)
djm@6458 234 panic("Restore vp status failed\n");
djm@6458 235
djm@6458 236 ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
djm@6458 237 ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
djm@6458 238 ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
djm@6458 239 ia64_set_kr(3, v->arch.arch_vmx.vkr[3]);
djm@6458 240 ia64_set_kr(4, v->arch.arch_vmx.vkr[4]);
djm@6458 241 ia64_set_kr(5, v->arch.arch_vmx.vkr[5]);
djm@6458 242 ia64_set_kr(6, v->arch.arch_vmx.vkr[6]);
djm@6458 243 ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
djm@6458 244 /* Guest vTLB is not required to be switched explicitly, since
djm@6458 245 * anchored in vcpu */
djm@6458 246 }
djm@6458 247
djm@6458 248 /*
djm@6458 249 * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
djm@6458 250 * is registered here.
djm@6458 251 */
djm@6458 252 void
kaf24@7720 253 vmx_final_setup_guest(struct vcpu *v)
djm@6458 254 {
djm@6458 255 vpd_t *vpd;
djm@6458 256
djm@6458 257 /* Allocate resources for vcpu 0 */
djm@6458 258 //memset(&v->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
djm@6458 259
djm@6458 260 vpd = alloc_vpd();
djm@6458 261 ASSERT(vpd);
djm@6458 262
kaf24@7720 263 v->arch.privregs = vpd;
djm@6458 264 vpd->virt_env_vaddr = vm_buffer;
djm@6458 265
djm@6878 266 /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
djm@6878 267 * to this solution. Maybe it can be deferred until we know created
djm@6878 268 * one as vmx domain */
djm@6878 269 v->arch.vtlb = init_domain_tlb(v);
djm@6878 270
djm@6458 271 /* v->arch.schedule_tail = arch_vmx_do_launch; */
djm@6458 272 vmx_create_vp(v);
djm@6458 273
djm@6458 274 /* Set this ed to be vmx */
djm@6458 275 set_bit(ARCH_VMX_VMCS_LOADED, &v->arch.arch_vmx.flags);
djm@6458 276
djm@6458 277 /* Physical mode emulation initialization, including
djm@6458 278 * emulation ID allcation and related memory request
djm@6458 279 */
djm@6458 280 physical_mode_init(v);
djm@6458 281
djm@6458 282 vlsapic_reset(v);
djm@6458 283 vtm_init(v);
djm@6458 284
kaf24@7720 285 /* One more step to enable interrupt assist */
kaf24@7720 286 set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
djm@6458 287 }
djm@6799 288
djm@6799 289 typedef struct io_range {
djm@6799 290 unsigned long start;
djm@6799 291 unsigned long size;
djm@6799 292 unsigned long type;
djm@6799 293 } io_range_t;
djm@6799 294
djm@6799 295 io_range_t io_ranges[] = {
djm@6799 296 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
djm@6799 297 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
djm@6799 298 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
djm@6799 299 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
djm@6799 300 {PIB_START, PIB_SIZE, GPFN_PIB},
djm@6799 301 };
djm@6799 302
djm@7333 303 #define VMX_SYS_PAGES (2 + (GFW_SIZE >> PAGE_SHIFT))
djm@6799 304 #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
djm@6799 305
djm@6799 306 int vmx_alloc_contig_pages(struct domain *d)
djm@6799 307 {
djm@7333 308 unsigned int order;
djm@7333 309 unsigned long i, j, start, end, pgnr, conf_nr;
djm@6799 310 struct pfn_info *page;
djm@6799 311 struct vcpu *v = d->vcpu[0];
djm@6799 312
djm@6799 313 ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
djm@6799 314
djm@7333 315 /* Mark I/O ranges */
djm@7333 316 for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
djm@7333 317 for (j = io_ranges[i].start;
djm@7333 318 j < io_ranges[i].start + io_ranges[i].size;
djm@7333 319 j += PAGE_SIZE)
djm@7333 320 map_domain_page(d, j, io_ranges[i].type);
djm@7333 321 }
djm@7333 322
djm@6799 323 conf_nr = VMX_CONFIG_PAGES(d);
djm@6799 324 order = get_order_from_pages(conf_nr);
djm@6799 325 if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
djm@6799 326 printk("Could not allocate order=%d pages for vmx contig alloc\n",
djm@6799 327 order);
djm@6799 328 return -1;
djm@6799 329 }
djm@6799 330
djm@6799 331 /* Map normal memory below 3G */
djm@6799 332 pgnr = page_to_pfn(page);
djm@6799 333 end = conf_nr << PAGE_SHIFT;
djm@6799 334 for (i = 0;
djm@6799 335 i < (end < MMIO_START ? end : MMIO_START);
djm@6799 336 i += PAGE_SIZE, pgnr++)
djm@6799 337 map_domain_page(d, i, pgnr << PAGE_SHIFT);
djm@6799 338
djm@6799 339 /* Map normal memory beyond 4G */
djm@6799 340 if (unlikely(end > MMIO_START)) {
djm@6799 341 start = 4 * MEM_G;
djm@6799 342 end = start + (end - 3 * MEM_G);
djm@6799 343 for (i = start; i < end; i += PAGE_SIZE, pgnr++)
djm@6799 344 map_domain_page(d, i, pgnr << PAGE_SHIFT);
djm@6799 345 }
djm@6799 346
djm@6799 347 d->arch.max_pfn = end >> PAGE_SHIFT;
djm@6799 348
djm@7333 349 order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT);
djm@6799 350 if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
djm@6799 351 printk("Could not allocate order=%d pages for vmx contig alloc\n",
djm@6799 352 order);
djm@6799 353 return -1;
djm@6799 354 }
djm@6799 355
djm@7333 356 /* Map guest firmware */
djm@7333 357 pgnr = page_to_pfn(page);
djm@7333 358 for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
djm@7333 359 map_domain_page(d, i, pgnr << PAGE_SHIFT);
djm@7333 360
djm@7333 361 if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
djm@7333 362 printk("Could not allocate order=1 pages for vmx contig alloc\n");
djm@7333 363 return -1;
djm@7333 364 }
djm@7333 365
djm@6799 366 /* Map for shared I/O page and xenstore */
djm@6799 367 pgnr = page_to_pfn(page);
djm@6799 368 map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
djm@6799 369 pgnr++;
djm@6799 370 map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
djm@6799 371
djm@6799 372 set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
djm@6799 373 return 0;
djm@6799 374 }
djm@7333 375
kaf24@7720 376 void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c)
djm@7333 377 {
djm@7333 378 shared_iopage_t *sp;
djm@7333 379
djm@7333 380 ASSERT(d != dom0); /* only for non-privileged vti domain */
djm@7333 381 d->arch.vmx_platform.shared_page_va =
djm@7333 382 __va(__gpa_to_mpa(d, IO_PAGE_START));
djm@7333 383 sp = get_sp(d);
djm@7333 384 //memset((char *)sp,0,PAGE_SIZE);
djm@7333 385 /* TEMP */
djm@7333 386 d->arch.vmx_platform.pib_base = 0xfee00000UL;
djm@7333 387
djm@7333 388 /* Only open one port for I/O and interrupt emulation */
kaf24@7720 389 memset(&d->shared_info->evtchn_mask[0], 0xff,
kaf24@7720 390 sizeof(d->shared_info->evtchn_mask));
kaf24@7720 391 clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
djm@7333 392
kaf24@7720 393 /* Initialize the virtual interrupt lines */
kaf24@7720 394 vmx_virq_line_init(d);
djm@7333 395
kaf24@7720 396 /* Initialize iosapic model within hypervisor */
kaf24@7720 397 vmx_vioapic_init(d);
djm@7333 398 }
djm@7333 399
djm@7333 400