ia64/xen-unstable

annotate xen/arch/ia64/vmx/vmx_init.c @ 8916:0f59ace5442c

[IA64] Clean up warnings related to VTi code. (C files)

This patch removes most of the warnings such as incompatible assignment,
unused variables, return value type of some functions and so on.

Signed-off-by: Zhang Xiantao <xiantao.zhang @intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Fri Feb 24 13:29:18 2006 -0700 (2006-02-24)
parents 9eb9fa8a9933
children 6c43118bdba8
rev   line source
djm@6458 1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
djm@6458 2 /*
djm@6458 3 * vmx_init.c: initialization work for vt specific domain
djm@6458 4 * Copyright (c) 2005, Intel Corporation.
djm@6458 5 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
djm@6458 6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
djm@6458 7 * Fred Yang <fred.yang@intel.com>
djm@6458 8 *
djm@6458 9 * This program is free software; you can redistribute it and/or modify it
djm@6458 10 * under the terms and conditions of the GNU General Public License,
djm@6458 11 * version 2, as published by the Free Software Foundation.
djm@6458 12 *
djm@6458 13 * This program is distributed in the hope it will be useful, but WITHOUT
djm@6458 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
djm@6458 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
djm@6458 16 * more details.
djm@6458 17 *
djm@6458 18 * You should have received a copy of the GNU General Public License along with
djm@6458 19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
djm@6458 20 * Place - Suite 330, Boston, MA 02111-1307 USA.
djm@6458 21 *
djm@6458 22 */
djm@6458 23
djm@6458 24 /*
djm@6458 25 * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@intel.com>:
djm@6458 26 * Disable doubling mapping
djm@6458 27 *
djm@6458 28 * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
djm@6458 29 * Simplied design in first step:
djm@6458 30 * - One virtual environment
djm@6458 31 * - Domain is bound to one LP
djm@6458 32 * Later to support guest SMP:
djm@6458 33 * - Need interface to handle VP scheduled to different LP
djm@6458 34 */
djm@6458 35 #include <xen/config.h>
djm@6458 36 #include <xen/types.h>
djm@6458 37 #include <xen/sched.h>
djm@6458 38 #include <asm/pal.h>
djm@6458 39 #include <asm/page.h>
djm@6458 40 #include <asm/processor.h>
djm@6458 41 #include <asm/vmx_vcpu.h>
djm@6458 42 #include <xen/lib.h>
djm@6458 43 #include <asm/vmmu.h>
djm@6458 44 #include <public/arch-ia64.h>
kaf24@8492 45 #include <public/hvm/ioreq.h>
djm@6458 46 #include <asm/vmx_phy_mode.h>
djm@6458 47 #include <asm/processor.h>
djm@6458 48 #include <asm/vmx.h>
djm@6458 49 #include <xen/mm.h>
djm@7333 50 #include <public/arch-ia64.h>
kaf24@8708 51 #include <asm/hvm/vioapic.h>
djm@6458 52
djm@6458 53 /* Global flag to identify whether Intel vmx feature is on */
djm@6458 54 u32 vmx_enabled = 0;
kaf24@7720 55 unsigned int opt_vmx_debug_level = 0;
djm@6458 56 static u32 vm_order;
djm@6458 57 static u64 buffer_size;
djm@6458 58 static u64 vp_env_info;
djm@6458 59 static u64 vm_buffer = 0; /* Buffer required to bring up VMX feature */
djm@6458 60 u64 __vsa_base = 0; /* Run-time service base of VMX */
djm@6458 61
djm@6458 62 /* Check whether vt feature is enabled or not. */
djm@6458 63 void
djm@6458 64 identify_vmx_feature(void)
djm@6458 65 {
djm@6458 66 pal_status_t ret;
djm@6458 67 u64 avail = 1, status = 1, control = 1;
djm@6458 68
djm@6458 69 vmx_enabled = 0;
djm@6458 70 /* Check VT-i feature */
djm@6458 71 ret = ia64_pal_proc_get_features(&avail, &status, &control);
djm@6458 72 if (ret != PAL_STATUS_SUCCESS) {
djm@6458 73 printk("Get proc features failed.\n");
djm@6458 74 goto no_vti;
djm@6458 75 }
djm@6458 76
djm@6458 77 /* FIXME: do we need to check status field, to see whether
djm@6458 78 * PSR.vm is actually enabled? If yes, aonther call to
djm@6458 79 * ia64_pal_proc_set_features may be reuqired then.
djm@6458 80 */
djm@6458 81 printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n",
djm@6458 82 avail, status, control, avail & PAL_PROC_VM_BIT);
djm@6458 83 if (!(avail & PAL_PROC_VM_BIT)) {
djm@6458 84 printk("No VT feature supported.\n");
djm@6458 85 goto no_vti;
djm@6458 86 }
djm@6458 87
djm@6458 88 ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info);
djm@6458 89 if (ret != PAL_STATUS_SUCCESS) {
djm@6458 90 printk("Get vp environment info failed.\n");
djm@6458 91 goto no_vti;
djm@6458 92 }
djm@6458 93
djm@6458 94 /* Does xen has ability to decode itself? */
djm@6458 95 if (!(vp_env_info & VP_OPCODE))
djm@6458 96 printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
djm@6458 97 vm_order = get_order(buffer_size);
awilliam@8916 98 printk("vm buffer size: %ld, order: %ld\n", buffer_size, vm_order);
djm@6458 99
djm@6458 100 vmx_enabled = 1;
djm@6458 101 no_vti:
djm@6458 102 return;
djm@6458 103 }
djm@6458 104
djm@6458 105 /*
djm@6458 106 * Init virtual environment on current LP
djm@6458 107 * vsa_base is the indicator whether it's first LP to be initialized
djm@6458 108 * for current domain.
djm@6458 109 */
djm@6458 110 void
djm@6458 111 vmx_init_env(void)
djm@6458 112 {
djm@6458 113 u64 status, tmp_base;
djm@6458 114
djm@6458 115 if (!vm_buffer) {
awilliam@8916 116 vm_buffer = (unsigned long)alloc_xenheap_pages(vm_order);
djm@6458 117 ASSERT(vm_buffer);
djm@6458 118 printk("vm_buffer: 0x%lx\n", vm_buffer);
djm@6458 119 }
djm@6458 120
djm@6458 121 status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
djm@6458 122 __pa(vm_buffer),
djm@6458 123 vm_buffer,
djm@6458 124 &tmp_base);
djm@6458 125
djm@6458 126 if (status != PAL_STATUS_SUCCESS) {
djm@6458 127 printk("ia64_pal_vp_init_env failed.\n");
awilliam@8916 128 return ;
djm@6458 129 }
djm@6458 130
djm@6458 131 if (!__vsa_base)
djm@6458 132 __vsa_base = tmp_base;
djm@6458 133 else
djm@6458 134 ASSERT(tmp_base != __vsa_base);
djm@6458 135
djm@6458 136 }
djm@6458 137
djm@6458 138 typedef union {
djm@6458 139 u64 value;
djm@6458 140 struct {
djm@6458 141 u64 number : 8;
djm@6458 142 u64 revision : 8;
djm@6458 143 u64 model : 8;
djm@6458 144 u64 family : 8;
djm@6458 145 u64 archrev : 8;
djm@6458 146 u64 rv : 24;
djm@6458 147 };
djm@6458 148 } cpuid3_t;
djm@6458 149
djm@6458 150 /* Allocate vpd from xenheap */
djm@6458 151 static vpd_t *alloc_vpd(void)
djm@6458 152 {
djm@6458 153 int i;
djm@6458 154 cpuid3_t cpuid3;
djm@6458 155 vpd_t *vpd;
djm@6458 156
djm@6458 157 vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
djm@6458 158 if (!vpd) {
djm@6458 159 printk("VPD allocation failed.\n");
djm@6458 160 return NULL;
djm@6458 161 }
djm@6458 162
djm@6458 163 printk("vpd base: 0x%lx, vpd size:%d\n", vpd, sizeof(vpd_t));
djm@6458 164 memset(vpd, 0, VPD_SIZE);
djm@6458 165 /* CPUID init */
djm@6458 166 for (i = 0; i < 5; i++)
djm@6458 167 vpd->vcpuid[i] = ia64_get_cpuid(i);
djm@6458 168
djm@6458 169 /* Limit the CPUID number to 5 */
djm@6458 170 cpuid3.value = vpd->vcpuid[3];
djm@6458 171 cpuid3.number = 4; /* 5 - 1 */
djm@6458 172 vpd->vcpuid[3] = cpuid3.value;
djm@6458 173
djm@6458 174 vpd->vdc.d_vmsw = 1;
djm@6458 175 return vpd;
djm@6458 176 }
djm@6458 177
djm@6458 178
djm@6458 179 /*
djm@6458 180 * Create a VP on intialized VMX environment.
djm@6458 181 */
djm@6458 182 static void
djm@6458 183 vmx_create_vp(struct vcpu *v)
djm@6458 184 {
djm@6458 185 u64 ret;
djm@6801 186 vpd_t *vpd = v->arch.privregs;
djm@6458 187 u64 ivt_base;
djm@6458 188 extern char vmx_ia64_ivt;
djm@6458 189 /* ia64_ivt is function pointer, so need this tranlation */
djm@6458 190 ivt_base = (u64) &vmx_ia64_ivt;
djm@6458 191 printk("ivt_base: 0x%lx\n", ivt_base);
awilliam@8916 192 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
djm@6458 193 if (ret != PAL_STATUS_SUCCESS)
djm@6458 194 panic("ia64_pal_vp_create failed. \n");
djm@6458 195 }
djm@6458 196
djm@6458 197 /* Other non-context related tasks can be done in context switch */
djm@6458 198 void
djm@6458 199 vmx_save_state(struct vcpu *v)
djm@6458 200 {
awilliam@8916 201 u64 status;
djm@6458 202
djm@6458 203 /* FIXME: about setting of pal_proc_vector... time consuming */
awilliam@8916 204 status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
djm@6458 205 if (status != PAL_STATUS_SUCCESS)
djm@6458 206 panic("Save vp status failed\n");
djm@6458 207
djm@6458 208
djm@6458 209 /* Need to save KR when domain switch, though HV itself doesn;t
djm@6458 210 * use them.
djm@6458 211 */
djm@6458 212 v->arch.arch_vmx.vkr[0] = ia64_get_kr(0);
djm@6458 213 v->arch.arch_vmx.vkr[1] = ia64_get_kr(1);
djm@6458 214 v->arch.arch_vmx.vkr[2] = ia64_get_kr(2);
djm@6458 215 v->arch.arch_vmx.vkr[3] = ia64_get_kr(3);
djm@6458 216 v->arch.arch_vmx.vkr[4] = ia64_get_kr(4);
djm@6458 217 v->arch.arch_vmx.vkr[5] = ia64_get_kr(5);
djm@6458 218 v->arch.arch_vmx.vkr[6] = ia64_get_kr(6);
djm@6458 219 v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);
djm@6458 220 }
djm@6458 221
djm@6458 222 /* Even guest is in physical mode, we still need such double mapping */
djm@6458 223 void
djm@6458 224 vmx_load_state(struct vcpu *v)
djm@6458 225 {
awilliam@8916 226 u64 status;
djm@6458 227
djm@6801 228 status = ia64_pal_vp_restore(v->arch.privregs, 0);
djm@6458 229 if (status != PAL_STATUS_SUCCESS)
djm@6458 230 panic("Restore vp status failed\n");
djm@6458 231
djm@6458 232 ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
djm@6458 233 ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
djm@6458 234 ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
djm@6458 235 ia64_set_kr(3, v->arch.arch_vmx.vkr[3]);
djm@6458 236 ia64_set_kr(4, v->arch.arch_vmx.vkr[4]);
djm@6458 237 ia64_set_kr(5, v->arch.arch_vmx.vkr[5]);
djm@6458 238 ia64_set_kr(6, v->arch.arch_vmx.vkr[6]);
djm@6458 239 ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
djm@6458 240 /* Guest vTLB is not required to be switched explicitly, since
djm@6458 241 * anchored in vcpu */
djm@6458 242 }
djm@6458 243
djm@6458 244 /*
djm@6458 245 * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
djm@6458 246 * is registered here.
djm@6458 247 */
djm@6458 248 void
kaf24@7720 249 vmx_final_setup_guest(struct vcpu *v)
djm@6458 250 {
djm@6458 251 vpd_t *vpd;
djm@6458 252
djm@6458 253 /* Allocate resources for vcpu 0 */
djm@6458 254 //memset(&v->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
djm@6458 255
djm@6458 256 vpd = alloc_vpd();
djm@6458 257 ASSERT(vpd);
djm@6458 258
kaf24@7720 259 v->arch.privregs = vpd;
djm@6458 260 vpd->virt_env_vaddr = vm_buffer;
djm@6458 261
djm@6878 262 /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
djm@6878 263 * to this solution. Maybe it can be deferred until we know created
djm@6878 264 * one as vmx domain */
djm@6878 265 v->arch.vtlb = init_domain_tlb(v);
djm@6878 266
djm@6458 267 /* v->arch.schedule_tail = arch_vmx_do_launch; */
djm@6458 268 vmx_create_vp(v);
djm@6458 269
djm@6458 270 /* Set this ed to be vmx */
djm@6458 271 set_bit(ARCH_VMX_VMCS_LOADED, &v->arch.arch_vmx.flags);
djm@6458 272
djm@6458 273 /* Physical mode emulation initialization, including
djm@6458 274 * emulation ID allcation and related memory request
djm@6458 275 */
djm@6458 276 physical_mode_init(v);
djm@6458 277
djm@6458 278 vlsapic_reset(v);
djm@6458 279 vtm_init(v);
djm@6458 280
kaf24@7720 281 /* One more step to enable interrupt assist */
kaf24@7720 282 set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
djm@6458 283 }
djm@6799 284
djm@6799 285 typedef struct io_range {
djm@6799 286 unsigned long start;
djm@6799 287 unsigned long size;
djm@6799 288 unsigned long type;
djm@6799 289 } io_range_t;
djm@6799 290
djm@6799 291 io_range_t io_ranges[] = {
djm@6799 292 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
djm@6799 293 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
djm@6799 294 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
djm@6799 295 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
djm@6799 296 {PIB_START, PIB_SIZE, GPFN_PIB},
djm@6799 297 };
djm@6799 298
djm@7333 299 #define VMX_SYS_PAGES (2 + (GFW_SIZE >> PAGE_SHIFT))
djm@6799 300 #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
djm@6799 301
djm@6799 302 int vmx_alloc_contig_pages(struct domain *d)
djm@6799 303 {
djm@7333 304 unsigned int order;
djm@7333 305 unsigned long i, j, start, end, pgnr, conf_nr;
kaf24@8726 306 struct page_info *page;
djm@6799 307 struct vcpu *v = d->vcpu[0];
djm@6799 308
djm@6799 309 ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
djm@6799 310
djm@7333 311 /* Mark I/O ranges */
djm@7333 312 for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
djm@7333 313 for (j = io_ranges[i].start;
djm@7333 314 j < io_ranges[i].start + io_ranges[i].size;
djm@7333 315 j += PAGE_SIZE)
awilliam@8778 316 assign_domain_page(d, j, io_ranges[i].type);
djm@7333 317 }
djm@7333 318
djm@6799 319 conf_nr = VMX_CONFIG_PAGES(d);
djm@6799 320 order = get_order_from_pages(conf_nr);
djm@6799 321 if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
djm@6799 322 printk("Could not allocate order=%d pages for vmx contig alloc\n",
djm@6799 323 order);
djm@6799 324 return -1;
djm@6799 325 }
djm@6799 326
djm@6799 327 /* Map normal memory below 3G */
kaf24@8726 328 pgnr = page_to_mfn(page);
djm@6799 329 end = conf_nr << PAGE_SHIFT;
djm@6799 330 for (i = 0;
djm@6799 331 i < (end < MMIO_START ? end : MMIO_START);
djm@6799 332 i += PAGE_SIZE, pgnr++)
awilliam@8778 333 assign_domain_page(d, i, pgnr << PAGE_SHIFT);
djm@6799 334
djm@6799 335 /* Map normal memory beyond 4G */
djm@6799 336 if (unlikely(end > MMIO_START)) {
djm@6799 337 start = 4 * MEM_G;
djm@6799 338 end = start + (end - 3 * MEM_G);
djm@6799 339 for (i = start; i < end; i += PAGE_SIZE, pgnr++)
awilliam@8778 340 assign_domain_page(d, i, pgnr << PAGE_SHIFT);
djm@6799 341 }
djm@6799 342
djm@6799 343 d->arch.max_pfn = end >> PAGE_SHIFT;
djm@6799 344
djm@7333 345 order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT);
djm@6799 346 if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
djm@6799 347 printk("Could not allocate order=%d pages for vmx contig alloc\n",
djm@6799 348 order);
djm@6799 349 return -1;
djm@6799 350 }
djm@6799 351
djm@7333 352 /* Map guest firmware */
kaf24@8726 353 pgnr = page_to_mfn(page);
djm@7333 354 for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
awilliam@8778 355 assign_domain_page(d, i, pgnr << PAGE_SHIFT);
djm@7333 356
djm@7333 357 if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
djm@7333 358 printk("Could not allocate order=1 pages for vmx contig alloc\n");
djm@7333 359 return -1;
djm@7333 360 }
djm@7333 361
djm@6799 362 /* Map for shared I/O page and xenstore */
kaf24@8726 363 pgnr = page_to_mfn(page);
awilliam@8778 364 assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
djm@6799 365 pgnr++;
awilliam@8778 366 assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
djm@6799 367
djm@6799 368 set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
djm@6799 369 return 0;
djm@6799 370 }
djm@7333 371
kaf24@7720 372 void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c)
djm@7333 373 {
djm@7333 374 shared_iopage_t *sp;
djm@7333 375
djm@7333 376 ASSERT(d != dom0); /* only for non-privileged vti domain */
djm@7333 377 d->arch.vmx_platform.shared_page_va =
awilliam@8916 378 (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
djm@7333 379 sp = get_sp(d);
djm@7333 380 //memset((char *)sp,0,PAGE_SIZE);
djm@7333 381 /* TEMP */
djm@7333 382 d->arch.vmx_platform.pib_base = 0xfee00000UL;
djm@7333 383
djm@7333 384 /* Only open one port for I/O and interrupt emulation */
kaf24@7720 385 memset(&d->shared_info->evtchn_mask[0], 0xff,
kaf24@7720 386 sizeof(d->shared_info->evtchn_mask));
kaf24@7720 387 clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
djm@7333 388
kaf24@7720 389 /* Initialize the virtual interrupt lines */
kaf24@7720 390 vmx_virq_line_init(d);
djm@7333 391
kaf24@7720 392 /* Initialize iosapic model within hypervisor */
kaf24@8708 393 hvm_vioapic_init(d);
djm@7333 394 }
djm@7333 395
djm@7333 396