From: Paul Durrant Date: Tue, 20 Mar 2018 18:05:24 +0000 (+0000) Subject: x86/hvm: re-structure some of the ioreq server look-up loops X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=603c24dcfff36cbd9fee6a7dab0160eac5753b08;p=people%2Fiwj%2Fxen.git x86/hvm: re-structure some of the ioreq server look-up loops This patch is a cosmetic re-structuring of some of the loops with look up an ioreq server based on target domain and server id. The restructuring is done separately here to ease review of a subsquent patch. Signed-off-by: Paul Durrant Reviewed-by: Jan Beulich Reviewed-by: Andrew Cooper --- diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index 154f6f1a32..fecabb96a9 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -835,37 +835,37 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id, &d->arch.hvm_domain.ioreq_server.list, list_entry ) { + struct rangeset *r; + if ( s == d->arch.hvm_domain.default_ioreq_server ) continue; - if ( s->id == id ) - { - struct rangeset *r; - - switch ( type ) - { - case XEN_DMOP_IO_RANGE_PORT: - case XEN_DMOP_IO_RANGE_MEMORY: - case XEN_DMOP_IO_RANGE_PCI: - r = s->range[type]; - break; + if ( s->id != id ) + continue; - default: - r = NULL; - break; - } + switch ( type ) + { + case XEN_DMOP_IO_RANGE_PORT: + case XEN_DMOP_IO_RANGE_MEMORY: + case XEN_DMOP_IO_RANGE_PCI: + r = s->range[type]; + break; - rc = -EINVAL; - if ( !r ) - break; + default: + r = NULL; + break; + } - rc = -EEXIST; - if ( rangeset_overlaps_range(r, start, end) ) - break; + rc = -EINVAL; + if ( !r ) + break; - rc = rangeset_add_range(r, start, end); + rc = -EEXIST; + if ( rangeset_overlaps_range(r, start, end) ) break; - } + + rc = rangeset_add_range(r, start, end); + break; } spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock); @@ -890,37 +890,37 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id, &d->arch.hvm_domain.ioreq_server.list, list_entry ) { + struct rangeset *r; + if ( s == d->arch.hvm_domain.default_ioreq_server ) continue; - if ( s->id == id ) - { - struct rangeset *r; - - switch ( type ) - { - case XEN_DMOP_IO_RANGE_PORT: - case XEN_DMOP_IO_RANGE_MEMORY: - case XEN_DMOP_IO_RANGE_PCI: - r = s->range[type]; - break; + if ( s->id != id ) + continue; - default: - r = NULL; - break; - } + switch ( type ) + { + case XEN_DMOP_IO_RANGE_PORT: + case XEN_DMOP_IO_RANGE_MEMORY: + case XEN_DMOP_IO_RANGE_PCI: + r = s->range[type]; + break; - rc = -EINVAL; - if ( !r ) - break; + default: + r = NULL; + break; + } - rc = -ENOENT; - if ( !rangeset_contains_range(r, start, end) ) - break; + rc = -EINVAL; + if ( !r ) + break; - rc = rangeset_remove_range(r, start, end); + rc = -ENOENT; + if ( !rangeset_contains_range(r, start, end) ) break; - } + + rc = rangeset_remove_range(r, start, end); + break; } spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock); @@ -958,11 +958,11 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id, if ( s == d->arch.hvm_domain.default_ioreq_server ) continue; - if ( s->id == id ) - { - rc = p2m_set_ioreq_server(d, flags, s); - break; - } + if ( s->id != id ) + continue; + + rc = p2m_set_ioreq_server(d, flags, s); + break; } spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);