ia64/xen-unstable

annotate xen/arch/ia64/vmx/vmx_init.c @ 6458:3ca4ca7a9cc2

Final changes for linux 2.6.13 rebasing and some directory reorgs
author djm@kirby.fc.hp.com
date Thu Sep 01 12:46:28 2005 -0600 (2005-09-01)
parents
children 2c823d27cf33
rev   line source
djm@6458 1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
djm@6458 2 /*
djm@6458 3 * vmx_init.c: initialization work for vt specific domain
djm@6458 4 * Copyright (c) 2005, Intel Corporation.
djm@6458 5 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
djm@6458 6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
djm@6458 7 * Fred Yang <fred.yang@intel.com>
djm@6458 8 *
djm@6458 9 * This program is free software; you can redistribute it and/or modify it
djm@6458 10 * under the terms and conditions of the GNU General Public License,
djm@6458 11 * version 2, as published by the Free Software Foundation.
djm@6458 12 *
djm@6458 13 * This program is distributed in the hope it will be useful, but WITHOUT
djm@6458 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
djm@6458 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
djm@6458 16 * more details.
djm@6458 17 *
djm@6458 18 * You should have received a copy of the GNU General Public License along with
djm@6458 19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
djm@6458 20 * Place - Suite 330, Boston, MA 02111-1307 USA.
djm@6458 21 *
djm@6458 22 */
djm@6458 23
djm@6458 24 /*
djm@6458 25 * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@intel.com>:
djm@6458 26 * Disable doubling mapping
djm@6458 27 *
djm@6458 28 * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
djm@6458 29 * Simplied design in first step:
djm@6458 30 * - One virtual environment
djm@6458 31 * - Domain is bound to one LP
djm@6458 32 * Later to support guest SMP:
djm@6458 33 * - Need interface to handle VP scheduled to different LP
djm@6458 34 */
djm@6458 35 #include <xen/config.h>
djm@6458 36 #include <xen/types.h>
djm@6458 37 #include <xen/sched.h>
djm@6458 38 #include <asm/pal.h>
djm@6458 39 #include <asm/page.h>
djm@6458 40 #include <asm/processor.h>
djm@6458 41 #include <asm/vmx_vcpu.h>
djm@6458 42 #include <xen/lib.h>
djm@6458 43 #include <asm/vmmu.h>
djm@6458 44 #include <public/arch-ia64.h>
djm@6458 45 #include <public/io/ioreq.h>
djm@6458 46 #include <asm/vmx_phy_mode.h>
djm@6458 47 #include <asm/processor.h>
djm@6458 48 #include <asm/vmx.h>
djm@6458 49 #include <xen/mm.h>
djm@6458 50
djm@6458 51 /* Global flag to identify whether Intel vmx feature is on */
djm@6458 52 u32 vmx_enabled = 0;
djm@6458 53 static u32 vm_order;
djm@6458 54 static u64 buffer_size;
djm@6458 55 static u64 vp_env_info;
djm@6458 56 static u64 vm_buffer = 0; /* Buffer required to bring up VMX feature */
djm@6458 57 u64 __vsa_base = 0; /* Run-time service base of VMX */
djm@6458 58
djm@6458 59 /* Check whether vt feature is enabled or not. */
djm@6458 60 void
djm@6458 61 identify_vmx_feature(void)
djm@6458 62 {
djm@6458 63 pal_status_t ret;
djm@6458 64 u64 avail = 1, status = 1, control = 1;
djm@6458 65
djm@6458 66 vmx_enabled = 0;
djm@6458 67 /* Check VT-i feature */
djm@6458 68 ret = ia64_pal_proc_get_features(&avail, &status, &control);
djm@6458 69 if (ret != PAL_STATUS_SUCCESS) {
djm@6458 70 printk("Get proc features failed.\n");
djm@6458 71 goto no_vti;
djm@6458 72 }
djm@6458 73
djm@6458 74 /* FIXME: do we need to check status field, to see whether
djm@6458 75 * PSR.vm is actually enabled? If yes, aonther call to
djm@6458 76 * ia64_pal_proc_set_features may be reuqired then.
djm@6458 77 */
djm@6458 78 printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n",
djm@6458 79 avail, status, control, avail & PAL_PROC_VM_BIT);
djm@6458 80 if (!(avail & PAL_PROC_VM_BIT)) {
djm@6458 81 printk("No VT feature supported.\n");
djm@6458 82 goto no_vti;
djm@6458 83 }
djm@6458 84
djm@6458 85 ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info);
djm@6458 86 if (ret != PAL_STATUS_SUCCESS) {
djm@6458 87 printk("Get vp environment info failed.\n");
djm@6458 88 goto no_vti;
djm@6458 89 }
djm@6458 90
djm@6458 91 /* Does xen has ability to decode itself? */
djm@6458 92 if (!(vp_env_info & VP_OPCODE))
djm@6458 93 printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
djm@6458 94 vm_order = get_order(buffer_size);
djm@6458 95 printk("vm buffer size: %d, order: %d\n", buffer_size, vm_order);
djm@6458 96
djm@6458 97 vmx_enabled = 1;
djm@6458 98 no_vti:
djm@6458 99 return;
djm@6458 100 }
djm@6458 101
djm@6458 102 /*
djm@6458 103 * Init virtual environment on current LP
djm@6458 104 * vsa_base is the indicator whether it's first LP to be initialized
djm@6458 105 * for current domain.
djm@6458 106 */
djm@6458 107 void
djm@6458 108 vmx_init_env(void)
djm@6458 109 {
djm@6458 110 u64 status, tmp_base;
djm@6458 111
djm@6458 112 if (!vm_buffer) {
djm@6458 113 vm_buffer = alloc_xenheap_pages(vm_order);
djm@6458 114 ASSERT(vm_buffer);
djm@6458 115 printk("vm_buffer: 0x%lx\n", vm_buffer);
djm@6458 116 }
djm@6458 117
djm@6458 118 status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
djm@6458 119 __pa(vm_buffer),
djm@6458 120 vm_buffer,
djm@6458 121 &tmp_base);
djm@6458 122
djm@6458 123 if (status != PAL_STATUS_SUCCESS) {
djm@6458 124 printk("ia64_pal_vp_init_env failed.\n");
djm@6458 125 return -1;
djm@6458 126 }
djm@6458 127
djm@6458 128 if (!__vsa_base)
djm@6458 129 __vsa_base = tmp_base;
djm@6458 130 else
djm@6458 131 ASSERT(tmp_base != __vsa_base);
djm@6458 132
djm@6458 133 #ifdef XEN_DBL_MAPPING
djm@6458 134 /* Init stub for rr7 switch */
djm@6458 135 vmx_init_double_mapping_stub();
djm@6458 136 #endif
djm@6458 137 }
djm@6458 138
djm@6458 139 void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c)
djm@6458 140 {
djm@6458 141 struct domain *d = v->domain;
djm@6458 142 shared_iopage_t *sp;
djm@6458 143
djm@6458 144 ASSERT(d != dom0); /* only for non-privileged vti domain */
djm@6458 145 d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg);
djm@6458 146 sp = get_sp(d);
djm@6458 147 memset((char *)sp,0,PAGE_SIZE);
djm@6458 148 /* FIXME: temp due to old CP */
djm@6458 149 sp->sp_global.eport = 2;
djm@6458 150 #ifdef V_IOSAPIC_READY
djm@6458 151 sp->vcpu_number = 1;
djm@6458 152 #endif
djm@6458 153 /* TEMP */
djm@6458 154 d->arch.vmx_platform.pib_base = 0xfee00000UL;
djm@6458 155
djm@6458 156 /* One more step to enable interrupt assist */
djm@6458 157 set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
djm@6458 158 /* Only open one port for I/O and interrupt emulation */
djm@6458 159 if (v == d->vcpu[0]) {
djm@6458 160 memset(&d->shared_info->evtchn_mask[0], 0xff,
djm@6458 161 sizeof(d->shared_info->evtchn_mask));
djm@6458 162 clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
djm@6458 163 }
djm@6458 164
djm@6458 165 /* FIXME: only support PMT table continuously by far */
djm@6458 166 d->arch.pmt = __va(c->pt_base);
djm@6458 167 d->arch.max_pfn = c->pt_max_pfn;
djm@6458 168
djm@6458 169 vmx_final_setup_domain(d);
djm@6458 170 }
djm@6458 171
djm@6458 172 typedef union {
djm@6458 173 u64 value;
djm@6458 174 struct {
djm@6458 175 u64 number : 8;
djm@6458 176 u64 revision : 8;
djm@6458 177 u64 model : 8;
djm@6458 178 u64 family : 8;
djm@6458 179 u64 archrev : 8;
djm@6458 180 u64 rv : 24;
djm@6458 181 };
djm@6458 182 } cpuid3_t;
djm@6458 183
djm@6458 184 /* Allocate vpd from xenheap */
djm@6458 185 static vpd_t *alloc_vpd(void)
djm@6458 186 {
djm@6458 187 int i;
djm@6458 188 cpuid3_t cpuid3;
djm@6458 189 vpd_t *vpd;
djm@6458 190
djm@6458 191 vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
djm@6458 192 if (!vpd) {
djm@6458 193 printk("VPD allocation failed.\n");
djm@6458 194 return NULL;
djm@6458 195 }
djm@6458 196
djm@6458 197 printk("vpd base: 0x%lx, vpd size:%d\n", vpd, sizeof(vpd_t));
djm@6458 198 memset(vpd, 0, VPD_SIZE);
djm@6458 199 /* CPUID init */
djm@6458 200 for (i = 0; i < 5; i++)
djm@6458 201 vpd->vcpuid[i] = ia64_get_cpuid(i);
djm@6458 202
djm@6458 203 /* Limit the CPUID number to 5 */
djm@6458 204 cpuid3.value = vpd->vcpuid[3];
djm@6458 205 cpuid3.number = 4; /* 5 - 1 */
djm@6458 206 vpd->vcpuid[3] = cpuid3.value;
djm@6458 207
djm@6458 208 vpd->vdc.d_vmsw = 1;
djm@6458 209 return vpd;
djm@6458 210 }
djm@6458 211
djm@6458 212
djm@6458 213 #ifdef CONFIG_VTI
djm@6458 214 /*
djm@6458 215 * Create a VP on intialized VMX environment.
djm@6458 216 */
djm@6458 217 static void
djm@6458 218 vmx_create_vp(struct vcpu *v)
djm@6458 219 {
djm@6458 220 u64 ret;
djm@6458 221 vpd_t *vpd = v->arch.arch_vmx.vpd;
djm@6458 222 u64 ivt_base;
djm@6458 223 extern char vmx_ia64_ivt;
djm@6458 224 /* ia64_ivt is function pointer, so need this tranlation */
djm@6458 225 ivt_base = (u64) &vmx_ia64_ivt;
djm@6458 226 printk("ivt_base: 0x%lx\n", ivt_base);
djm@6458 227 ret = ia64_pal_vp_create(vpd, ivt_base, 0);
djm@6458 228 if (ret != PAL_STATUS_SUCCESS)
djm@6458 229 panic("ia64_pal_vp_create failed. \n");
djm@6458 230 }
djm@6458 231
djm@6458 232 #ifdef XEN_DBL_MAPPING
djm@6458 233 void vmx_init_double_mapping_stub(void)
djm@6458 234 {
djm@6458 235 u64 base, psr;
djm@6458 236 extern void vmx_switch_rr7(void);
djm@6458 237
djm@6458 238 base = (u64) &vmx_switch_rr7;
djm@6458 239 base = *((u64*)base);
djm@6458 240
djm@6458 241 psr = ia64_clear_ic();
djm@6458 242 ia64_itr(0x1, IA64_TR_RR7_SWITCH_STUB, XEN_RR7_SWITCH_STUB,
djm@6458 243 pte_val(pfn_pte(__pa(base) >> PAGE_SHIFT, PAGE_KERNEL)),
djm@6458 244 RR7_SWITCH_SHIFT);
djm@6458 245 ia64_set_psr(psr);
djm@6458 246 ia64_srlz_i();
djm@6458 247 printk("Add TR mapping for rr7 switch stub, with physical: 0x%lx\n", (u64)(__pa(base)));
djm@6458 248 }
djm@6458 249 #endif
djm@6458 250
djm@6458 251 /* Other non-context related tasks can be done in context switch */
djm@6458 252 void
djm@6458 253 vmx_save_state(struct vcpu *v)
djm@6458 254 {
djm@6458 255 u64 status, psr;
djm@6458 256 u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
djm@6458 257
djm@6458 258 /* FIXME: about setting of pal_proc_vector... time consuming */
djm@6458 259 status = ia64_pal_vp_save(v->arch.arch_vmx.vpd, 0);
djm@6458 260 if (status != PAL_STATUS_SUCCESS)
djm@6458 261 panic("Save vp status failed\n");
djm@6458 262
djm@6458 263 #ifdef XEN_DBL_MAPPING
djm@6458 264 /* FIXME: Do we really need purge double mapping for old vcpu?
djm@6458 265 * Since rid is completely different between prev and next,
djm@6458 266 * it's not overlap and thus no MCA possible... */
djm@6458 267 dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
djm@6458 268 vmx_purge_double_mapping(dom_rr7, KERNEL_START,
djm@6458 269 (u64)v->arch.vtlb->ts->vhpt->hash);
djm@6458 270 #endif
djm@6458 271
djm@6458 272 /* Need to save KR when domain switch, though HV itself doesn;t
djm@6458 273 * use them.
djm@6458 274 */
djm@6458 275 v->arch.arch_vmx.vkr[0] = ia64_get_kr(0);
djm@6458 276 v->arch.arch_vmx.vkr[1] = ia64_get_kr(1);
djm@6458 277 v->arch.arch_vmx.vkr[2] = ia64_get_kr(2);
djm@6458 278 v->arch.arch_vmx.vkr[3] = ia64_get_kr(3);
djm@6458 279 v->arch.arch_vmx.vkr[4] = ia64_get_kr(4);
djm@6458 280 v->arch.arch_vmx.vkr[5] = ia64_get_kr(5);
djm@6458 281 v->arch.arch_vmx.vkr[6] = ia64_get_kr(6);
djm@6458 282 v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);
djm@6458 283 }
djm@6458 284
djm@6458 285 /* Even guest is in physical mode, we still need such double mapping */
djm@6458 286 void
djm@6458 287 vmx_load_state(struct vcpu *v)
djm@6458 288 {
djm@6458 289 u64 status, psr;
djm@6458 290 u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
djm@6458 291 u64 pte_xen, pte_vhpt;
djm@6458 292 int i;
djm@6458 293
djm@6458 294 status = ia64_pal_vp_restore(v->arch.arch_vmx.vpd, 0);
djm@6458 295 if (status != PAL_STATUS_SUCCESS)
djm@6458 296 panic("Restore vp status failed\n");
djm@6458 297
djm@6458 298 #ifdef XEN_DBL_MAPPING
djm@6458 299 dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
djm@6458 300 pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
djm@6458 301 pte_vhpt = pte_val(pfn_pte((__pa(v->arch.vtlb->ts->vhpt->hash) >> PAGE_SHIFT), PAGE_KERNEL));
djm@6458 302 vmx_insert_double_mapping(dom_rr7, KERNEL_START,
djm@6458 303 (u64)v->arch.vtlb->ts->vhpt->hash,
djm@6458 304 pte_xen, pte_vhpt);
djm@6458 305 #endif
djm@6458 306
djm@6458 307 ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
djm@6458 308 ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
djm@6458 309 ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
djm@6458 310 ia64_set_kr(3, v->arch.arch_vmx.vkr[3]);
djm@6458 311 ia64_set_kr(4, v->arch.arch_vmx.vkr[4]);
djm@6458 312 ia64_set_kr(5, v->arch.arch_vmx.vkr[5]);
djm@6458 313 ia64_set_kr(6, v->arch.arch_vmx.vkr[6]);
djm@6458 314 ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
djm@6458 315 /* Guest vTLB is not required to be switched explicitly, since
djm@6458 316 * anchored in vcpu */
djm@6458 317 }
djm@6458 318
djm@6458 319 #ifdef XEN_DBL_MAPPING
djm@6458 320 /* Purge old double mapping and insert new one, due to rr7 change */
djm@6458 321 void
djm@6458 322 vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7)
djm@6458 323 {
djm@6458 324 u64 pte_xen, pte_vhpt, vhpt_base;
djm@6458 325
djm@6458 326 vhpt_base = (u64)v->arch.vtlb->ts->vhpt->hash;
djm@6458 327 vmx_purge_double_mapping(oldrr7, KERNEL_START,
djm@6458 328 vhpt_base);
djm@6458 329
djm@6458 330 pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
djm@6458 331 pte_vhpt = pte_val(pfn_pte((__pa(vhpt_base) >> PAGE_SHIFT), PAGE_KERNEL));
djm@6458 332 vmx_insert_double_mapping(newrr7, KERNEL_START,
djm@6458 333 vhpt_base,
djm@6458 334 pte_xen, pte_vhpt);
djm@6458 335 }
djm@6458 336 #endif // XEN_DBL_MAPPING
djm@6458 337 #endif // CONFIG_VTI
djm@6458 338
djm@6458 339 /*
djm@6458 340 * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
djm@6458 341 * is registered here.
djm@6458 342 */
djm@6458 343 void
djm@6458 344 vmx_final_setup_domain(struct domain *d)
djm@6458 345 {
djm@6458 346 struct vcpu *v = d->vcpu[0];
djm@6458 347 vpd_t *vpd;
djm@6458 348
djm@6458 349 /* Allocate resources for vcpu 0 */
djm@6458 350 //memset(&v->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
djm@6458 351
djm@6458 352 vpd = alloc_vpd();
djm@6458 353 ASSERT(vpd);
djm@6458 354
djm@6458 355 v->arch.arch_vmx.vpd = vpd;
djm@6458 356 vpd->virt_env_vaddr = vm_buffer;
djm@6458 357
djm@6458 358 #ifdef CONFIG_VTI
djm@6458 359 /* v->arch.schedule_tail = arch_vmx_do_launch; */
djm@6458 360 vmx_create_vp(v);
djm@6458 361
djm@6458 362 /* Set this ed to be vmx */
djm@6458 363 set_bit(ARCH_VMX_VMCS_LOADED, &v->arch.arch_vmx.flags);
djm@6458 364
djm@6458 365 /* Physical mode emulation initialization, including
djm@6458 366 * emulation ID allcation and related memory request
djm@6458 367 */
djm@6458 368 physical_mode_init(v);
djm@6458 369
djm@6458 370 vlsapic_reset(v);
djm@6458 371 vtm_init(v);
djm@6458 372 #endif
djm@6458 373
djm@6458 374 /* Other vmx specific initialization work */
djm@6458 375 }