{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
+ struct vcpu *curr = current;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
unsigned long saddr, daddr, bytes;
paddr_t sgpa, dgpa;
uint32_t pfec = PFEC_page_present;
}
/* Check for MMIO ops */
- (void) get_gfn_query_unlocked(current->domain, sgpa >> PAGE_SHIFT, &sp2mt);
- (void) get_gfn_query_unlocked(current->domain, dgpa >> PAGE_SHIFT, &dp2mt);
+ get_gfn_query_unlocked(curr->domain, sgpa >> PAGE_SHIFT, &sp2mt);
+ get_gfn_query_unlocked(curr->domain, dgpa >> PAGE_SHIFT, &dp2mt);
if ( sp2mt == p2m_mmio_direct || dp2mt == p2m_mmio_direct ||
(sp2mt == p2m_mmio_dm && dp2mt == p2m_mmio_dm) )
rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
if ( rc == HVMTRANS_okay )
- rc = hvm_copy_to_guest_phys(dgpa, buf, bytes, current);
+ rc = hvm_copy_to_guest_phys(dgpa, buf, bytes, curr);
xfree(buf);
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
+ struct vcpu *curr = current;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
unsigned long addr, bytes;
paddr_t gpa;
p2m_type_t p2mt;
}
/* Check for MMIO op */
- (void)get_gfn_query_unlocked(current->domain, gpa >> PAGE_SHIFT, &p2mt);
+ get_gfn_query_unlocked(curr->domain, gpa >> PAGE_SHIFT, &p2mt);
switch ( p2mt )
{
if ( df )
gpa -= bytes - bytes_per_rep;
- rc = hvm_copy_to_guest_phys(gpa, buf, bytes, current);
+ rc = hvm_copy_to_guest_phys(gpa, buf, bytes, curr);
if ( buf != p_data )
xfree(buf);