&d->arch.hvm_domain.ioreq_server.list,
list_entry )
{
+ struct rangeset *r;
+
if ( s == d->arch.hvm_domain.default_ioreq_server )
continue;
- if ( s->id == id )
- {
- struct rangeset *r;
-
- switch ( type )
- {
- case XEN_DMOP_IO_RANGE_PORT:
- case XEN_DMOP_IO_RANGE_MEMORY:
- case XEN_DMOP_IO_RANGE_PCI:
- r = s->range[type];
- break;
+ if ( s->id != id )
+ continue;
- default:
- r = NULL;
- break;
- }
+ switch ( type )
+ {
+ case XEN_DMOP_IO_RANGE_PORT:
+ case XEN_DMOP_IO_RANGE_MEMORY:
+ case XEN_DMOP_IO_RANGE_PCI:
+ r = s->range[type];
+ break;
- rc = -EINVAL;
- if ( !r )
- break;
+ default:
+ r = NULL;
+ break;
+ }
- rc = -EEXIST;
- if ( rangeset_overlaps_range(r, start, end) )
- break;
+ rc = -EINVAL;
+ if ( !r )
+ break;
- rc = rangeset_add_range(r, start, end);
+ rc = -EEXIST;
+ if ( rangeset_overlaps_range(r, start, end) )
break;
- }
+
+ rc = rangeset_add_range(r, start, end);
+ break;
}
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
&d->arch.hvm_domain.ioreq_server.list,
list_entry )
{
+ struct rangeset *r;
+
if ( s == d->arch.hvm_domain.default_ioreq_server )
continue;
- if ( s->id == id )
- {
- struct rangeset *r;
-
- switch ( type )
- {
- case XEN_DMOP_IO_RANGE_PORT:
- case XEN_DMOP_IO_RANGE_MEMORY:
- case XEN_DMOP_IO_RANGE_PCI:
- r = s->range[type];
- break;
+ if ( s->id != id )
+ continue;
- default:
- r = NULL;
- break;
- }
+ switch ( type )
+ {
+ case XEN_DMOP_IO_RANGE_PORT:
+ case XEN_DMOP_IO_RANGE_MEMORY:
+ case XEN_DMOP_IO_RANGE_PCI:
+ r = s->range[type];
+ break;
- rc = -EINVAL;
- if ( !r )
- break;
+ default:
+ r = NULL;
+ break;
+ }
- rc = -ENOENT;
- if ( !rangeset_contains_range(r, start, end) )
- break;
+ rc = -EINVAL;
+ if ( !r )
+ break;
- rc = rangeset_remove_range(r, start, end);
+ rc = -ENOENT;
+ if ( !rangeset_contains_range(r, start, end) )
break;
- }
+
+ rc = rangeset_remove_range(r, start, end);
+ break;
}
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
if ( s == d->arch.hvm_domain.default_ioreq_server )
continue;
- if ( s->id == id )
- {
- rc = p2m_set_ioreq_server(d, flags, s);
- break;
- }
+ if ( s->id != id )
+ continue;
+
+ rc = p2m_set_ioreq_server(d, flags, s);
+ break;
}
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);