/* On non-NULL return, we leave this function holding an additional
* ref on the underlying mfn, if any */
-static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable,
- bool_t permanent)
+static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
+ bool_t *writable)
{
void *map;
p2m_type_t p2mt;
}
if ( writable )
- paging_mark_dirty(d, page_to_mfn(page));
+ {
+ if ( !p2m_is_discard_write(p2mt) )
+ paging_mark_dirty(d, page_to_mfn(page));
+ else
+ *writable = 0;
+ }
if ( !permanent )
return __map_domain_page(page);
return map;
}
-void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent)
+void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent,
+ bool_t *writable)
{
- return __hvm_map_guest_frame(gfn, 1, permanent);
+ *writable = 1;
+ return _hvm_map_guest_frame(gfn, permanent, writable);
}
void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent)
{
- return __hvm_map_guest_frame(gfn, 0, permanent);
+ return _hvm_map_guest_frame(gfn, permanent, NULL);
}
void hvm_unmap_guest_frame(void *p, bool_t permanent)
put_page(mfn_to_page(mfn));
}
-static void *hvm_map_entry(unsigned long va)
+static void *hvm_map_entry(unsigned long va, bool_t *writable)
{
unsigned long gfn;
uint32_t pfec;
if ( (pfec == PFEC_page_paged) || (pfec == PFEC_page_shared) )
goto fail;
- v = hvm_map_guest_frame_rw(gfn, 0);
+ v = hvm_map_guest_frame_rw(gfn, 0, writable);
if ( v == NULL )
goto fail;
struct segment_register desctab, cs, segr;
struct desc_struct *pdesc, desc;
u8 dpl, rpl, cpl;
+ bool_t writable;
int fault_type = TRAP_invalid_tss;
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct vcpu *v = current;
if ( ((sel & 0xfff8) + 7) > desctab.limit )
goto fail;
- pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8));
+ pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8), &writable);
if ( pdesc == NULL )
goto hvm_map_fail;
break;
}
} while ( !(desc.b & 0x100) && /* Ensure Accessed flag is set */
+ writable && /* except if we are to discard writes */
(cmpxchg(&pdesc->b, desc.b, desc.b | 0x100) != desc.b) );
/* Force the Accessed flag in our local copy. */
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct segment_register gdt, tr, prev_tr, segr;
struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
+ bool_t otd_writable, ntd_writable;
unsigned long eflags;
int exn_raised, rc;
struct {
goto out;
}
- optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8));
+ optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8),
+ &otd_writable);
if ( optss_desc == NULL )
goto out;
- nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8));
+ nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8), &ntd_writable);
if ( nptss_desc == NULL )
goto out;
v->arch.hvm_vcpu.guest_cr[0] |= X86_CR0_TS;
hvm_update_guest_cr(v, 0);
- if ( (taskswitch_reason == TSW_iret) ||
- (taskswitch_reason == TSW_jmp) )
+ if ( (taskswitch_reason == TSW_iret ||
+ taskswitch_reason == TSW_jmp) && otd_writable )
clear_bit(41, optss_desc); /* clear B flag of old task */
- if ( taskswitch_reason != TSW_iret )
+ if ( taskswitch_reason != TSW_iret && ntd_writable )
set_bit(41, nptss_desc); /* set B flag of new task */
if ( errcode >= 0 )
if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
{
- nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
- if ( nvcpu->nv_vvmcx )
- nvcpu->nv_vvmcxaddr = gpa;
- if ( !nvcpu->nv_vvmcx ||
+ bool_t writable;
+ void *vvmcx = hvm_map_guest_frame_rw(paddr_to_pfn(gpa), 1, &writable);
+
+ if ( vvmcx )
+ {
+ if ( writable )
+ {
+ nvcpu->nv_vvmcx = vvmcx;
+ nvcpu->nv_vvmcxaddr = gpa;
+ }
+ else
+ {
+ hvm_unmap_guest_frame(vvmcx, 1);
+ vvmcx = NULL;
+ }
+ }
+ if ( !vvmcx ||
!map_io_bitmap_all(v) ||
!_map_msr_bitmap(v) )
{
if ( rc != X86EMUL_OKAY )
return rc;
+ BUILD_BUG_ON(X86EMUL_OKAY != VMSUCCEED); /* rc = VMSUCCEED; */
if ( gpa & 0xfff )
- {
- vmreturn(regs, VMFAIL_INVALID);
- return X86EMUL_OKAY;
- }
-
- if ( gpa == nvcpu->nv_vvmcxaddr )
+ rc = VMFAIL_INVALID;
+ else if ( gpa == nvcpu->nv_vvmcxaddr )
{
if ( cpu_has_vmx_vmcs_shadowing )
nvmx_clear_vmcs_pointer(v, nvcpu->nv_vvmcx);
else
{
/* Even if this VMCS isn't the current one, we must clear it. */
- vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
+ bool_t writable;
+
+ vvmcs = hvm_map_guest_frame_rw(paddr_to_pfn(gpa), 0, &writable);
if ( vvmcs )
- clear_vvmcs_launched(&nvmx->launched_list,
- domain_page_map_to_mfn(vvmcs));
- hvm_unmap_guest_frame(vvmcs, 0);
+ {
+ if ( writable )
+ clear_vvmcs_launched(&nvmx->launched_list,
+ domain_page_map_to_mfn(vvmcs));
+ else
+ rc = VMFAIL_VALID;
+ hvm_unmap_guest_frame(vvmcs, 0);
+ }
}
- vmreturn(regs, VMSUCCEED);
+ vmreturn(regs, rc);
+
return X86EMUL_OKAY;
}