void hvm_do_resume(struct vcpu *v)
{
- ioreq_t *p;
+ ioreq_t *p = get_ioreq(v);
check_wakeup_from_wait();
pt_restore_timer(v);
/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
- if ( !(p = get_ioreq(v)) )
- goto check_inject_trap;
-
- while ( p->state != STATE_IOREQ_NONE )
+ if ( p )
{
- switch ( p->state )
+ while ( p->state != STATE_IOREQ_NONE )
{
- case STATE_IORESP_READY: /* IORESP_READY -> NONE */
- hvm_io_assist(p);
- break;
- case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
- case STATE_IOREQ_INPROCESS:
- wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
- (p->state != STATE_IOREQ_READY) &&
- (p->state != STATE_IOREQ_INPROCESS));
- break;
- default:
- gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
- domain_crash(v->domain);
- return; /* bail */
+ switch ( p->state )
+ {
+ case STATE_IORESP_READY: /* IORESP_READY -> NONE */
+ hvm_io_assist(p);
+ break;
+ case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
+ case STATE_IOREQ_INPROCESS:
+ wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
+ (p->state != STATE_IOREQ_READY) &&
+ (p->state != STATE_IOREQ_INPROCESS));
+ break;
+ default:
+ gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
+ domain_crash(v->domain);
+ return; /* bail */
+ }
}
}
- check_inject_trap:
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
}
}
-static void hvm_destroy_ioreq_page(
+static void hvm_unmap_ioreq_page(
struct domain *d, struct hvm_ioreq_page *iorp)
{
spin_lock(&iorp->lock);
return 0;
}
-static int hvm_set_ioreq_page(
+static int hvm_map_ioreq_page(
struct domain *d, struct hvm_ioreq_page *iorp, unsigned long gmfn)
{
struct page_info *page;
if ( hvm_funcs.nhvm_domain_relinquish_resources )
hvm_funcs.nhvm_domain_relinquish_resources(d);
- hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
- hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
+ hvm_unmap_ioreq_page(d, &d->arch.hvm_domain.ioreq);
+ hvm_unmap_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
msixtbl_pt_cleanup(d);
}
}
-bool_t hvm_send_assist_req(struct vcpu *v)
+bool_t hvm_send_assist_req(void)
{
- ioreq_t *p;
+ struct vcpu *v = current;
+ ioreq_t *p = get_ioreq(v);
if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
return 0; /* implicitly bins the i/o operation */
- if ( !(p = get_ioreq(v)) )
+ if ( !p )
return 0;
if ( unlikely(p->state != STATE_IOREQ_NONE) )
{
case HVM_PARAM_IOREQ_PFN:
iorp = &d->arch.hvm_domain.ioreq;
- if ( (rc = hvm_set_ioreq_page(d, iorp, a.value)) != 0 )
+ if ( (rc = hvm_map_ioreq_page(d, iorp, a.value)) != 0 )
break;
spin_lock(&iorp->lock);
if ( iorp->va != NULL )
get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
spin_unlock(&iorp->lock);
break;
- case HVM_PARAM_BUFIOREQ_PFN:
+ case HVM_PARAM_BUFIOREQ_PFN:
iorp = &d->arch.hvm_domain.buf_ioreq;
- rc = hvm_set_ioreq_page(d, iorp, a.value);
+ rc = hvm_map_ioreq_page(d, iorp, a.value);
break;
case HVM_PARAM_CALLBACK_IRQ:
hvm_set_callback_via(d, a.value);
int hvm_buffered_io_send(ioreq_t *p)
{
struct vcpu *v = current;
- struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq;
+ struct domain *d = v->domain;
+ struct hvm_ioreq_page *iorp = &d->arch.hvm_domain.buf_ioreq;
buffered_iopage_t *pg = iorp->va;
- buf_ioreq_t bp;
+ buf_ioreq_t bp = { .data = p->data,
+ .addr = p->addr,
+ .type = p->type,
+ .dir = p->dir };
/* Timeoffset sends 64b data, but no address. Use two consecutive slots. */
int qw = 0;
if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) )
return 0;
- bp.type = p->type;
- bp.dir = p->dir;
switch ( p->size )
{
case 1:
return 0;
}
- bp.data = p->data;
- bp.addr = p->addr;
-
spin_lock(&iorp->lock);
if ( (pg->write_pointer - pg->read_pointer) >=
return 0;
}
- memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
- &bp, sizeof(bp));
+ pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
if ( qw )
{
bp.data = p->data >> 32;
- memcpy(&pg->buf_ioreq[(pg->write_pointer+1) % IOREQ_BUFFER_SLOT_NUM],
- &bp, sizeof(bp));
+ pg->buf_ioreq[(pg->write_pointer+1) % IOREQ_BUFFER_SLOT_NUM] = bp;
}
/* Make the ioreq_t visible /before/ write_pointer. */
wmb();
pg->write_pointer += qw ? 2 : 1;
- notify_via_xen_event_channel(v->domain,
- v->domain->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN]);
+ notify_via_xen_event_channel(d,
+ d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN]);
spin_unlock(&iorp->lock);
return 1;
void send_timeoffset_req(unsigned long timeoff)
{
- ioreq_t p[1];
+ ioreq_t p = {
+ .type = IOREQ_TYPE_TIMEOFFSET,
+ .size = 8,
+ .count = 1,
+ .dir = IOREQ_WRITE,
+ .data = timeoff,
+ .state = STATE_IOREQ_READY,
+ };
if ( timeoff == 0 )
return;
- memset(p, 0, sizeof(*p));
-
- p->type = IOREQ_TYPE_TIMEOFFSET;
- p->size = 8;
- p->count = 1;
- p->dir = IOREQ_WRITE;
- p->data = timeoff;
-
- p->state = STATE_IOREQ_READY;
-
- if ( !hvm_buffered_io_send(p) )
+ if ( !hvm_buffered_io_send(&p) )
printk("Unsuccessful timeoffset update\n");
}
p->dir = IOREQ_WRITE;
p->data = ~0UL; /* flush all */
- (void)hvm_send_assist_req(v);
+ (void)hvm_send_assist_req();
}
int handle_mmio(void)