case HVM_PARAM_CONSOLE_EVTCHN:
case HVM_PARAM_X87_FIP_WIDTH:
break;
+ /* The following parameters are deprecated. */
+ case HVM_PARAM_DM_DOMAIN:
+ case HVM_PARAM_BUFIOREQ_EVTCHN:
+ rc = -EPERM;
+ break;
/*
* The following parameters must not be set by the guest
* since the domain may need to be paused.
*/
case HVM_PARAM_IDENT_PT:
- case HVM_PARAM_DM_DOMAIN:
case HVM_PARAM_ACPI_S_STATE:
/* The remaining parameters should not be set by the guest. */
default:
d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
rc = -EINVAL;
break;
- case HVM_PARAM_BUFIOREQ_EVTCHN:
- rc = -EINVAL;
- break;
case HVM_PARAM_TRIPLE_FAULT_REASON:
if ( a.value > SHUTDOWN_MAX )
rc = -EINVAL;
case HVM_PARAM_ALTP2M:
case HVM_PARAM_X87_FIP_WIDTH:
break;
- /*
- * The following parameters must not be read by the guest
- * since the domain may need to be paused.
- */
- case HVM_PARAM_IOREQ_PFN:
- case HVM_PARAM_BUFIOREQ_PFN:
+ /* The following parameters are deprecated. */
+ case HVM_PARAM_DM_DOMAIN:
case HVM_PARAM_BUFIOREQ_EVTCHN:
+ rc = -ENODATA;
+ break;
/* The remaining parameters should not be read by the guest. */
default:
if ( d == current->domain )
case HVM_PARAM_X87_FIP_WIDTH:
a.value = d->arch.x87_fip_width;
break;
- case HVM_PARAM_IOREQ_PFN:
- case HVM_PARAM_BUFIOREQ_PFN:
- case HVM_PARAM_BUFIOREQ_EVTCHN:
- /*
- * It may be necessary to create a default ioreq server here,
- * because legacy versions of QEMU are not aware of the new API for
- * explicit ioreq server creation. However, if the domain is not
- * under construction then it will not be QEMU querying the
- * parameters and thus the query should not have that side-effect.
- */
- if ( !d->creation_finished )
- {
- rc = hvm_create_ioreq_server(d, true,
- HVM_IOREQSRV_BUFIOREQ_LEGACY, NULL);
- if ( rc != 0 && rc != -EEXIST )
- goto out;
- }
-
- /*FALLTHRU*/
default:
a.value = d->arch.hvm.params[a.index];
break;
return GET_IOREQ_SERVER(d, id);
}
-#define IS_DEFAULT(s) \
- ((s) && (s) == GET_IOREQ_SERVER((s)->target, DEFAULT_IOSERVID))
-
/*
* Iterate over all possible ioreq servers.
*
struct domain *d = s->target;
unsigned int i;
- ASSERT(!IS_DEFAULT(s));
-
for ( i = 0; i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8; i++ )
{
if ( test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.mask) )
struct domain *d = s->target;
unsigned int i = gfn_x(gfn) - d->arch.hvm.ioreq_gfn.base;
- ASSERT(!IS_DEFAULT(s));
ASSERT(!gfn_eq(gfn, INVALID_GFN));
set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
destroy_ring_for_helper(&iorp->va, iorp->page);
iorp->page = NULL;
- if ( !IS_DEFAULT(s) )
- hvm_free_ioreq_gfn(s, iorp->gfn);
-
+ hvm_free_ioreq_gfn(s, iorp->gfn);
iorp->gfn = INVALID_GFN;
}
if ( d->is_dying )
return -EINVAL;
- if ( IS_DEFAULT(s) )
- iorp->gfn = _gfn(buf ?
- d->arch.hvm.params[HVM_PARAM_BUFIOREQ_PFN] :
- d->arch.hvm.params[HVM_PARAM_IOREQ_PFN]);
- else
- iorp->gfn = hvm_alloc_ioreq_gfn(s);
+ iorp->gfn = hvm_alloc_ioreq_gfn(s);
if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return -ENOMEM;
struct domain *d = s->target;
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
- if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return;
if ( guest_physmap_remove_page(d, iorp->gfn,
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
int rc;
- if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return 0;
clear_page(iorp->va);
if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
{
- struct domain *d = s->target;
-
rc = alloc_unbound_xen_event_channel(v->domain, 0,
s->emulator->domain_id, NULL);
if ( rc < 0 )
goto fail3;
s->bufioreq_evtchn = rc;
- if ( IS_DEFAULT(s) )
- d->arch.hvm.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
- s->bufioreq_evtchn;
}
sv->vcpu = v;
{
unsigned int i;
- if ( IS_DEFAULT(s) )
- return;
-
for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
rangeset_destroy(s->range[i]);
}
unsigned int i;
int rc;
- if ( id == DEFAULT_IOSERVID )
- goto done;
-
- ASSERT(!IS_DEFAULT(s));
-
for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
{
char *name;
rangeset_limit(s->range[i], MAX_NR_IO_RANGES);
}
- done:
return 0;
fail:
s->bufioreq_handling = bufioreq_handling;
- if ( id == DEFAULT_IOSERVID )
- {
- rc = hvm_ioreq_server_map_pages(s);
- if ( rc )
- goto fail_map;
- }
-
for_each_vcpu ( d, v )
{
rc = hvm_ioreq_server_add_vcpu(s, v);
hvm_ioreq_server_remove_all_vcpus(s);
hvm_ioreq_server_unmap_pages(s);
- fail_map:
hvm_ioreq_server_free_rangesets(s);
put_domain(s->emulator);
put_domain(s->emulator);
}
-int hvm_create_ioreq_server(struct domain *d, bool is_default,
- int bufioreq_handling, ioservid_t *id)
+int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
+ ioservid_t *id)
{
struct hvm_ioreq_server *s;
unsigned int i;
domain_pause(d);
spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
- if ( is_default )
+ for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
{
- i = DEFAULT_IOSERVID;
-
- rc = -EEXIST;
- if ( GET_IOREQ_SERVER(d, i) )
- goto fail;
+ if ( !GET_IOREQ_SERVER(d, i) )
+ break;
}
- else
- {
- for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
- {
- if ( i != DEFAULT_IOSERVID && !GET_IOREQ_SERVER(d, i) )
- break;
- }
- rc = -ENOSPC;
- if ( i >= MAX_NR_IOREQ_SERVERS )
- goto fail;
- }
+ rc = -ENOSPC;
+ if ( i >= MAX_NR_IOREQ_SERVERS )
+ goto fail;
/*
* It is safe to call set_ioreq_server() prior to
* hvm_ioreq_server_init() since the target domain is paused.
- * It is necessary for the calls to be ordered thus otherwise
- * the IS_DEFAULT() macro would not evaluate correctly.
*/
set_ioreq_server(d, i, s);
goto fail;
}
- if ( i == DEFAULT_IOSERVID )
- hvm_ioreq_server_enable(s);
-
if ( id )
*id = i;
struct hvm_ioreq_server *s;
int rc;
- if ( id == DEFAULT_IOSERVID )
- return -EPERM;
-
spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
if ( !s )
goto out;
- ASSERT(!IS_DEFAULT(s));
-
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
/*
* It is safe to call hvm_ioreq_server_deinit() prior to
- * set_ioreq_server() since the target domain is paused. It is
- * necessary for the calls to be ordered thus otherwise the
- * IS_DEFAULT() macro would not evaluate correctly.
+ * set_ioreq_server() since the target domain is paused.
*/
hvm_ioreq_server_deinit(s);
set_ioreq_server(d, id, NULL);
struct hvm_ioreq_server *s;
int rc;
- if ( id == DEFAULT_IOSERVID )
- return -EOPNOTSUPP;
-
spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
if ( !s )
goto out;
- ASSERT(!IS_DEFAULT(s));
-
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
struct hvm_ioreq_server *s;
int rc;
- if ( id == DEFAULT_IOSERVID )
- return -EOPNOTSUPP;
-
if ( !is_hvm_domain(d) )
return -EINVAL;
if ( !s )
goto out;
- ASSERT(!IS_DEFAULT(s));
-
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
if ( start > end )
return -EINVAL;
- if ( id == DEFAULT_IOSERVID )
- return -EOPNOTSUPP;
-
spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
if ( !s )
goto out;
- ASSERT(!IS_DEFAULT(s));
-
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
if ( start > end )
return -EINVAL;
- if ( id == DEFAULT_IOSERVID )
- return -EOPNOTSUPP;
-
spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
if ( !s )
goto out;
- ASSERT(!IS_DEFAULT(s));
-
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
struct hvm_ioreq_server *s;
int rc;
- if ( id == DEFAULT_IOSERVID )
- return -EOPNOTSUPP;
-
if ( type != HVMMEM_ioreq_server )
return -EINVAL;
if ( !s )
goto out;
- ASSERT(!IS_DEFAULT(s));
-
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
struct hvm_ioreq_server *s;
int rc;
- if ( id == DEFAULT_IOSERVID )
- return -EOPNOTSUPP;
-
spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
if ( !s )
goto out;
- ASSERT(!IS_DEFAULT(s));
-
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
/*
* It is safe to call hvm_ioreq_server_deinit() prior to
* set_ioreq_server() since the target domain is being destroyed.
- * It is necessary for the calls to be ordered thus otherwise the
- * IS_DEFAULT() macro would not evaluate correctly.
*/
hvm_ioreq_server_deinit(s);
set_ioreq_server(d, id, NULL);
unsigned int id;
if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
- return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
+ return NULL;
cf8 = d->arch.hvm.pci_cf8;
{
struct rangeset *r;
- if ( IS_DEFAULT(s) || !s->enabled )
+ if ( !s->enabled )
continue;
r = s->range[type];
}
}
- return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
+ return NULL;
}
static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)