#define RMAP_USES_HASHTAB(page) \
((page)->sharing->hash_table.flag == NULL)
#define RMAP_HEAVY_SHARED_PAGE RMAP_HASHTAB_SIZE
-/* A bit of hysteresis. We don't want to be mutating between list and hash
- * table constantly. */
+/*
+ * A bit of hysteresis. We don't want to be mutating between list and hash
+ * table constantly.
+ */
#define RMAP_LIGHT_SHARED_PAGE (RMAP_HEAVY_SHARED_PAGE >> 2)
#if MEM_SHARING_AUDIT
xfree(container_of(head, struct page_sharing_info, rcu_head));
}
-static inline void audit_add_list(struct page_info *page)
+static void audit_add_list(struct page_info *page)
{
INIT_LIST_HEAD(&page->sharing->entry);
spin_lock(&shr_audit_lock);
}
/* Removes from the audit list and cleans up the page sharing metadata. */
-static inline void page_sharing_dispose(struct page_info *page)
+static void page_sharing_dispose(struct page_info *page)
{
/* Unlikely given our thresholds, but we should be careful. */
if ( unlikely(RMAP_USES_HASHTAB(page)) )
- free_xenheap_pages(page->sharing->hash_table.bucket,
- RMAP_HASHTAB_ORDER);
+ free_xenheap_pages(page->sharing->hash_table.bucket,
+ RMAP_HASHTAB_ORDER);
spin_lock(&shr_audit_lock);
list_del_rcu(&page->sharing->entry);
#else
#define audit_add_list(p) ((void)0)
-static inline void page_sharing_dispose(struct page_info *page)
+static void page_sharing_dispose(struct page_info *page)
{
/* Unlikely given our thresholds, but we should be careful. */
if ( unlikely(RMAP_USES_HASHTAB(page)) )
- free_xenheap_pages(page->sharing->hash_table.bucket,
- RMAP_HASHTAB_ORDER);
+ free_xenheap_pages(page->sharing->hash_table.bucket,
+ RMAP_HASHTAB_ORDER);
xfree(page->sharing);
}
* Nesting may happen when sharing (and locking) two pages.
* Deadlock is avoided by locking pages in increasing order.
* All memory sharing code paths take the p2m lock of the affected gfn before
- * taking the lock for the underlying page. We enforce ordering between page_lock
- * and p2m_lock using an mm-locks.h construct.
+ * taking the lock for the underlying page. We enforce ordering between
+ * page_lock and p2m_lock using an mm-locks.h construct.
*
* TODO: Investigate if PGT_validated is necessary.
*/
-static inline bool _page_lock(struct page_info *page)
+static bool _page_lock(struct page_info *page)
{
unsigned long x, nx;
return true;
}
-static inline void _page_unlock(struct page_info *page)
+static void _page_unlock(struct page_info *page)
{
unsigned long x, nx, y = page->u.inuse.type_info;
} while ( (y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x );
}
-static inline bool mem_sharing_page_lock(struct page_info *pg)
+static bool mem_sharing_page_lock(struct page_info *pg)
{
bool rc;
pg_lock_data_t *pld = &(this_cpu(__pld));
if ( rc )
{
preempt_disable();
- page_sharing_mm_post_lock(&pld->mm_unlock_level,
+ page_sharing_mm_post_lock(&pld->mm_unlock_level,
&pld->recurse_count);
}
return rc;
}
-static inline void mem_sharing_page_unlock(struct page_info *pg)
+static void mem_sharing_page_unlock(struct page_info *pg)
{
pg_lock_data_t *pld = &(this_cpu(__pld));
- page_sharing_mm_unlock(pld->mm_unlock_level,
+ page_sharing_mm_unlock(pld->mm_unlock_level,
&pld->recurse_count);
preempt_enable();
_page_unlock(pg);
}
-static inline shr_handle_t get_next_handle(void)
+static shr_handle_t get_next_handle(void)
{
- /* Get the next handle get_page style */
+ /* Get the next handle get_page style */
uint64_t x, y = next_handle;
do {
x = y;
#define mem_sharing_enabled(d) \
(is_hvm_domain(d) && (d)->arch.hvm.mem_sharing_enabled)
-static atomic_t nr_saved_mfns = ATOMIC_INIT(0);
+static atomic_t nr_saved_mfns = ATOMIC_INIT(0);
static atomic_t nr_shared_mfns = ATOMIC_INIT(0);
-/** Reverse map **/
-/* Every shared frame keeps a reverse map (rmap) of <domain, gfn> tuples that
+/*
+ * Reverse map
+ *
+ * Every shared frame keeps a reverse map (rmap) of <domain, gfn> tuples that
* this shared frame backs. For pages with a low degree of sharing, a O(n)
* search linked list is good enough. For pages with higher degree of sharing,
- * we use a hash table instead. */
+ * we use a hash table instead.
+ */
typedef struct gfn_info
{
unsigned long gfn;
- domid_t domain;
+ domid_t domain;
struct list_head list;
} gfn_info_t;
-static inline void
-rmap_init(struct page_info *page)
+static void rmap_init(struct page_info *page)
{
/* We always start off as a doubly linked list. */
INIT_LIST_HEAD(&page->sharing->gfns);
#define HASH(domain, gfn) \
(((gfn) + (domain)) % RMAP_HASHTAB_SIZE)
-/* Conversions. Tuned by the thresholds. Should only happen twice
- * (once each) during the lifetime of a shared page */
-static inline int
-rmap_list_to_hash_table(struct page_info *page)
+/*
+ * Conversions. Tuned by the thresholds. Should only happen twice
+ * (once each) during the lifetime of a shared page.
+ */
+static inline int rmap_list_to_hash_table(struct page_info *page)
{
unsigned int i;
struct list_head *pos, *tmp, *b =
for ( i = 0; i < RMAP_HASHTAB_SIZE; i++ )
INIT_LIST_HEAD(b + i);
- list_for_each_safe(pos, tmp, &page->sharing->gfns)
+ list_for_each_safe ( pos, tmp, &page->sharing->gfns )
{
gfn_info_t *gfn_info = list_entry(pos, gfn_info_t, list);
struct list_head *bucket = b + HASH(gfn_info->domain, gfn_info->gfn);
+
list_del(pos);
list_add(pos, bucket);
}
return 0;
}
-static inline void
-rmap_hash_table_to_list(struct page_info *page)
+static void rmap_hash_table_to_list(struct page_info *page)
{
unsigned int i;
struct list_head *bucket = page->sharing->hash_table.bucket;
for ( i = 0; i < RMAP_HASHTAB_SIZE; i++ )
{
struct list_head *pos, *tmp, *head = bucket + i;
- list_for_each_safe(pos, tmp, head)
+
+ list_for_each_safe ( pos, tmp, head )
{
list_del(pos);
list_add(pos, &page->sharing->gfns);
}
/* Generic accessors to the rmap */
-static inline unsigned long
-rmap_count(struct page_info *pg)
+static unsigned long rmap_count(const struct page_info *pg)
{
unsigned long count;
unsigned long t = read_atomic(&pg->u.inuse.type_info);
+
count = t & PGT_count_mask;
if ( t & PGT_locked )
count--;
return count;
}
-/* The page type count is always decreased after removing from the rmap.
- * Use a convert flag to avoid mutating the rmap if in the middle of an
- * iterator, or if the page will be soon destroyed anyways. */
-static inline void
-rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert)
+/*
+ * The page type count is always decreased after removing from the rmap.
+ * Use a convert flag to avoid mutating the rmap if in the middle of an
+ * iterator, or if the page will be soon destroyed anyways.
+ */
+static void rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert)
{
if ( RMAP_USES_HASHTAB(page) && convert &&
(rmap_count(page) <= RMAP_LIGHT_SHARED_PAGE) )
}
/* The page type count is always increased before adding to the rmap. */
-static inline void
-rmap_add(gfn_info_t *gfn_info, struct page_info *page)
+static void rmap_add(gfn_info_t *gfn_info, struct page_info *page)
{
struct list_head *head;
if ( !RMAP_USES_HASHTAB(page) &&
(rmap_count(page) >= RMAP_HEAVY_SHARED_PAGE) )
- /* The conversion may fail with ENOMEM. We'll be less efficient,
- * but no reason to panic. */
+ /*
+ * The conversion may fail with ENOMEM. We'll be less efficient,
+ * but no reason to panic.
+ */
(void)rmap_list_to_hash_table(page);
- head = (RMAP_USES_HASHTAB(page)) ?
- page->sharing->hash_table.bucket +
- HASH(gfn_info->domain, gfn_info->gfn) :
- &page->sharing->gfns;
+ head = (RMAP_USES_HASHTAB(page)
+ ? page->sharing->hash_table.bucket + HASH(gfn_info->domain,
+ gfn_info->gfn)
+ : &page->sharing->gfns);
INIT_LIST_HEAD(&gfn_info->list);
list_add(&gfn_info->list, head);
}
-static inline gfn_info_t *
-rmap_retrieve(uint16_t domain_id, unsigned long gfn,
- struct page_info *page)
+static gfn_info_t *rmap_retrieve(uint16_t domain_id, unsigned long gfn,
+ struct page_info *page)
{
gfn_info_t *gfn_info;
struct list_head *le, *head;
- head = (RMAP_USES_HASHTAB(page)) ?
- page->sharing->hash_table.bucket + HASH(domain_id, gfn) :
- &page->sharing->gfns;
+ head = (RMAP_USES_HASHTAB(page)
+ ? page->sharing->hash_table.bucket + HASH(domain_id, gfn)
+ : &page->sharing->gfns);
- list_for_each(le, head)
+ list_for_each ( le, head )
{
gfn_info = list_entry(le, gfn_info_t, list);
if ( (gfn_info->gfn == gfn) && (gfn_info->domain == domain_id) )
}
/* Returns true if the rmap has only one entry. O(1) complexity. */
-static inline int rmap_has_one_entry(struct page_info *page)
+static bool rmap_has_one_entry(const struct page_info *page)
{
- return (rmap_count(page) == 1);
+ return rmap_count(page) == 1;
}
/* Returns true if the rmap has any entries. O(1) complexity. */
-static inline int rmap_has_entries(struct page_info *page)
+static bool rmap_has_entries(const struct page_info *page)
{
- return (rmap_count(page) != 0);
+ return rmap_count(page) != 0;
}
-/* The iterator hides the details of how the rmap is implemented. This
- * involves splitting the list_for_each_safe macro into two steps. */
+/*
+ * The iterator hides the details of how the rmap is implemented. This
+ * involves splitting the list_for_each_safe macro into two steps.
+ */
struct rmap_iterator {
struct list_head *curr;
struct list_head *next;
unsigned int bucket;
};
-static inline void
-rmap_seed_iterator(struct page_info *page, struct rmap_iterator *ri)
+static void rmap_seed_iterator(struct page_info *page, struct rmap_iterator *ri)
{
- ri->curr = (RMAP_USES_HASHTAB(page)) ?
- page->sharing->hash_table.bucket :
- &page->sharing->gfns;
- ri->next = ri->curr->next;
+ ri->curr = (RMAP_USES_HASHTAB(page)
+ ? page->sharing->hash_table.bucket
+ : &page->sharing->gfns);
+ ri->next = ri->curr->next;
ri->bucket = 0;
}
-static inline gfn_info_t *
-rmap_iterate(struct page_info *page, struct rmap_iterator *ri)
+static gfn_info_t *rmap_iterate(struct page_info *page,
+ struct rmap_iterator *ri)
{
- struct list_head *head = (RMAP_USES_HASHTAB(page)) ?
- page->sharing->hash_table.bucket + ri->bucket :
- &page->sharing->gfns;
+ struct list_head *head = (RMAP_USES_HASHTAB(page)
+ ? page->sharing->hash_table.bucket + ri->bucket
+ : &page->sharing->gfns);
-retry:
+ retry:
if ( ri->next == head)
{
if ( RMAP_USES_HASHTAB(page) )
ri->curr = head;
ri->next = ri->curr->next;
goto retry;
- } else
+ }
+ else
/* List exhausted */
return NULL;
}
return list_entry(ri->curr, gfn_info_t, list);
}
-static inline gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page,
- struct domain *d,
- unsigned long gfn)
+static gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page,
+ struct domain *d, unsigned long gfn)
{
gfn_info_t *gfn_info = xmalloc(gfn_info_t);
if ( gfn_info == NULL )
- return NULL;
+ return NULL;
gfn_info->gfn = gfn;
gfn_info->domain = d->domain_id;
return gfn_info;
}
-static inline void mem_sharing_gfn_destroy(struct page_info *page,
- struct domain *d,
- gfn_info_t *gfn_info)
+static void mem_sharing_gfn_destroy(struct page_info *page, struct domain *d,
+ gfn_info_t *gfn_info)
{
/* Decrement the number of pages. */
atomic_dec(&d->shr_pages);
xfree(gfn_info);
}
-static struct page_info* mem_sharing_lookup(unsigned long mfn)
+static struct page_info *mem_sharing_lookup(unsigned long mfn)
{
- if ( mfn_valid(_mfn(mfn)) )
- {
- struct page_info* page = mfn_to_page(_mfn(mfn));
- if ( page_get_owner(page) == dom_cow )
- {
- /* Count has to be at least two, because we're called
- * with the mfn locked (1) and this is supposed to be
- * a shared page (1). */
- unsigned long t = read_atomic(&page->u.inuse.type_info);
- ASSERT((t & PGT_type_mask) == PGT_shared_page);
- ASSERT((t & PGT_count_mask) >= 2);
- ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
- return page;
- }
- }
+ struct page_info *page;
+ unsigned long t;
- return NULL;
+ if ( !mfn_valid(_mfn(mfn)) )
+ return NULL;
+
+ page = mfn_to_page(_mfn(mfn));
+ if ( page_get_owner(page) != dom_cow )
+ return NULL;
+
+ /*
+ * Count has to be at least two, because we're called
+ * with the mfn locked (1) and this is supposed to be
+ * a shared page (1).
+ */
+ t = read_atomic(&page->u.inuse.type_info);
+ ASSERT((t & PGT_type_mask) == PGT_shared_page);
+ ASSERT((t & PGT_count_mask) >= 2);
+ ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
+
+ return page;
}
static int audit(void)
rcu_read_lock(&shr_audit_read_lock);
- list_for_each_rcu(ae, &shr_audit_list)
+ list_for_each_rcu ( ae, &shr_audit_list )
{
struct page_sharing_info *pg_shared_info;
unsigned long nr_gfns = 0;
/* If we can't lock it, it's definitely not a shared page */
if ( !mem_sharing_page_lock(pg) )
{
- MEM_SHARING_DEBUG("mfn %lx in audit list, but cannot be locked (%lx)!\n",
- mfn_x(mfn), pg->u.inuse.type_info);
- errors++;
- continue;
+ MEM_SHARING_DEBUG(
+ "mfn %lx in audit list, but cannot be locked (%lx)!\n",
+ mfn_x(mfn), pg->u.inuse.type_info);
+ errors++;
+ continue;
}
- /* Check if the MFN has correct type, owner and handle. */
+ /* Check if the MFN has correct type, owner and handle. */
if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_shared_page )
{
- MEM_SHARING_DEBUG("mfn %lx in audit list, but not PGT_shared_page (%lx)!\n",
- mfn_x(mfn), pg->u.inuse.type_info & PGT_type_mask);
- errors++;
- continue;
+ MEM_SHARING_DEBUG(
+ "mfn %lx in audit list, but not PGT_shared_page (%lx)!\n",
+ mfn_x(mfn), pg->u.inuse.type_info & PGT_type_mask);
+ errors++;
+ continue;
}
/* Check the page owner. */
if ( page_get_owner(pg) != dom_cow )
{
- MEM_SHARING_DEBUG("mfn %lx shared, but wrong owner (%hu)!\n",
- mfn_x(mfn), page_get_owner(pg)->domain_id);
- errors++;
+ MEM_SHARING_DEBUG("mfn %lx shared, but wrong owner %pd!\n",
+ mfn_x(mfn), page_get_owner(pg));
+ errors++;
}
/* Check the m2p entry */
if ( !SHARED_M2P(get_gpfn_from_mfn(mfn_x(mfn))) )
{
- MEM_SHARING_DEBUG("mfn %lx shared, but wrong m2p entry (%lx)!\n",
- mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn)));
- errors++;
+ MEM_SHARING_DEBUG("mfn %lx shared, but wrong m2p entry (%lx)!\n",
+ mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn)));
+ errors++;
}
/* Check we have a list */
if ( (!pg->sharing) || !rmap_has_entries(pg) )
{
- MEM_SHARING_DEBUG("mfn %lx shared, but empty gfn list!\n",
- mfn_x(mfn));
- errors++;
- continue;
+ MEM_SHARING_DEBUG("mfn %lx shared, but empty gfn list!\n",
+ mfn_x(mfn));
+ errors++;
+ continue;
}
/* We've found a page that is shared */
errors++;
continue;
}
- o_mfn = get_gfn_query_unlocked(d, g->gfn, &t);
+ o_mfn = get_gfn_query_unlocked(d, g->gfn, &t);
if ( !mfn_eq(o_mfn, mfn) )
{
MEM_SHARING_DEBUG("Incorrect P2M for d=%hu, PFN=%lx."
{
MEM_SHARING_DEBUG("Mismatched counts for MFN=%lx."
"nr_gfns in list %lu, in type_info %lx\n",
- mfn_x(mfn), nr_gfns,
+ mfn_x(mfn), nr_gfns,
(pg->u.inuse.type_info & PGT_count_mask));
errors++;
}
.reason = VM_EVENT_REASON_MEM_SHARING,
.vcpu_id = v->vcpu_id,
.u.mem_sharing.gfn = gfn,
- .u.mem_sharing.p2mt = p2m_ram_shared
+ .u.mem_sharing.p2mt = p2m_ram_shared,
};
- if ( (rc = __vm_event_claim_slot(d,
- d->vm_event_share, allow_sleep)) < 0 )
+ if ( (rc = __vm_event_claim_slot(
+ d, d->vm_event_share, allow_sleep)) < 0 )
return rc;
if ( v->domain == d )
unsigned int mem_sharing_get_nr_saved_mfns(void)
{
- return ((unsigned int)atomic_read(&nr_saved_mfns));
+ return atomic_read(&nr_saved_mfns);
}
unsigned int mem_sharing_get_nr_shared_mfns(void)
{
- return (unsigned int)atomic_read(&nr_shared_mfns);
+ return atomic_read(&nr_shared_mfns);
}
/* Functions that change a page's type and ownership */
-static int page_make_sharable(struct domain *d,
- struct page_info *page,
- int expected_refcnt)
+static int page_make_sharable(struct domain *d,
+ struct page_info *page,
+ int expected_refcnt)
{
bool_t drop_dom_ref;
return -EEXIST;
}
- /* Check if the ref count is 2. The first from PGC_allocated, and
- * the second from get_page_and_type at the top of this function */
+ /*
+ * Check if the ref count is 2. The first from PGC_allocated, and
+ * the second from get_page_and_type at the top of this function.
+ */
if ( page->count_info != (PGC_allocated | (2 + expected_refcnt)) )
{
spin_unlock(&d->page_alloc_lock);
if ( !get_page(page, dom_cow) )
return -EINVAL;
-
+
spin_lock(&d->page_alloc_lock);
if ( d->is_dying )
return 0;
}
-static inline struct page_info *__grab_shared_page(mfn_t mfn)
+static struct page_info *__grab_shared_page(mfn_t mfn)
{
struct page_info *pg = NULL;
if ( !mfn_valid(mfn) )
return NULL;
+
pg = mfn_to_page(mfn);
- /* If the page is not validated we can't lock it, and if it's
- * not validated it's obviously not shared. */
+ /*
+ * If the page is not validated we can't lock it, and if it's
+ * not validated it's obviously not shared.
+ */
if ( !mem_sharing_page_lock(pg) )
return NULL;
return -EINVAL;
}
- MEM_SHARING_DEBUG(
- "Debug page: MFN=%lx is ci=%lx, ti=%lx, owner_id=%d\n",
- mfn_x(page_to_mfn(page)),
- page->count_info,
- page->u.inuse.type_info,
- page_get_owner(page)->domain_id);
+ MEM_SHARING_DEBUG(
+ "Debug page: MFN=%lx is ci=%lx, ti=%lx, owner=%pd\n",
+ mfn_x(page_to_mfn(page)), page->count_info,
+ page->u.inuse.type_info, page_get_owner(page));
/* -1 because the page is locked and that's an additional type ref */
num_refs = ((int) (page->u.inuse.type_info & PGT_count_mask)) - 1;
mfn = get_gfn_query(d, gfn_x(gfn), &p2mt);
- MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n",
+ MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n",
d->domain_id, gfn_x(gfn));
num_refs = debug_mfn(mfn);
put_gfn(d, gfn_x(gfn));
d->domain_id, ref, rc);
return rc;
}
-
- MEM_SHARING_DEBUG(
- "==> Grant [dom=%d,ref=%d], status=%x. ",
- d->domain_id, ref, status);
+
+ MEM_SHARING_DEBUG("==> Grant [dom=%d,ref=%d], status=%x. ",
+ d->domain_id, ref, status);
return debug_gfn(d, gfn);
}
goto out;
/* Return the handle if the page is already shared */
- if ( p2m_is_shared(p2mt) ) {
+ if ( p2m_is_shared(p2mt) )
+ {
struct page_info *pg = __grab_shared_page(mfn);
if ( !pg )
- {
- gprintk(XENLOG_ERR,
- "Shared p2m entry gfn %" PRI_gfn ", but could not grab mfn %" PRI_mfn " dom%d\n",
- gfn_x(gfn), mfn_x(mfn), d->domain_id);
BUG();
- }
+
*phandle = pg->sharing->handle;
ret = 0;
mem_sharing_page_unlock(pg);
if ( !p2m_is_sharable(p2mt) )
goto out;
-#ifdef CONFIG_HVM
/* Check if there are mem_access/remapped altp2m entries for this page */
if ( altp2m_active(d) )
{
altp2m_list_unlock(d);
}
-#endif
/* Try to convert the mfn to the sharable type */
page = mfn_to_page(mfn);
- ret = page_make_sharable(d, page, expected_refcnt);
- if ( ret )
+ ret = page_make_sharable(d, page, expected_refcnt);
+ if ( ret )
goto out;
- /* Now that the page is validated, we can lock it. There is no
- * race because we're holding the p2m entry, so no one else
- * could be nominating this gfn */
+ /*
+ * Now that the page is validated, we can lock it. There is no
+ * race because we're holding the p2m entry, so no one else
+ * could be nominating this gfn.
+ */
ret = -ENOENT;
if ( !mem_sharing_page_lock(page) )
goto out;
/* Initialize the shared state */
ret = -ENOMEM;
- if ( (page->sharing =
- xmalloc(struct page_sharing_info)) == NULL )
+ if ( !(page->sharing = xmalloc(struct page_sharing_info)) )
{
/* Making a page private atomically unlocks it */
- BUG_ON(page_make_private(d, page) != 0);
+ BUG_ON(page_make_private(d, page));
goto out;
}
page->sharing->pg = page;
rmap_init(page);
/* Create the handle */
- page->sharing->handle = get_next_handle();
+ page->sharing->handle = get_next_handle();
/* Create the local gfn info */
- if ( mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) == NULL )
+ if ( !mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) )
{
xfree(page->sharing);
page->sharing = NULL;
- BUG_ON(page_make_private(d, page) != 0);
+ BUG_ON(page_make_private(d, page));
goto out;
}
get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
- /* This tricky business is to avoid two callers deadlocking if
- * grabbing pages in opposite client/source order */
+ /*
+ * This tricky business is to avoid two callers deadlocking if
+ * grabbing pages in opposite client/source order.
+ */
if ( mfn_eq(smfn, cmfn) )
{
- /* The pages are already the same. We could return some
+ /*
+ * The pages are already the same. We could return some
* kind of error here, but no matter how you look at it,
* the pages are already 'shared'. It possibly represents
* a big problem somewhere else, but as far as sharing is
- * concerned: great success! */
+ * concerned: great success!
+ */
ret = 0;
goto err_out;
}
- else if ( mfn_x(smfn) < mfn_x(cmfn) )
+
+ if ( mfn_x(smfn) < mfn_x(cmfn) )
{
ret = XENMEM_SHARING_OP_S_HANDLE_INVALID;
spage = firstpg = __grab_shared_page(smfn);
mem_sharing_page_unlock(spage);
goto err_out;
}
- } else {
+ }
+ else
+ {
ret = XENMEM_SHARING_OP_C_HANDLE_INVALID;
cpage = firstpg = __grab_shared_page(cmfn);
if ( cpage == NULL )
mem_sharing_page_unlock(firstpg);
goto err_out;
}
+
if ( cpage->sharing->handle != ch )
{
ret = XENMEM_SHARING_OP_C_HANDLE_INVALID;
rmap_seed_iterator(cpage, &ri);
while ( (gfn = rmap_iterate(cpage, &ri)) != NULL)
{
- /* Get the source page and type, this should never fail:
- * we are under shr lock, and got a successful lookup */
+ /*
+ * Get the source page and type, this should never fail:
+ * we are under shr lock, and got a successful lookup.
+ */
BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page));
- /* Move the gfn_info from client list to source list.
- * Don't change the type of rmap for the client page. */
+ /*
+ * Move the gfn_info from client list to source list.
+ * Don't change the type of rmap for the client page.
+ */
rmap_del(gfn, cpage, 0);
rmap_add(gfn, spage);
put_count++;
atomic_dec(&nr_shared_mfns);
atomic_inc(&nr_saved_mfns);
ret = 0;
-
+
err_out:
put_two_gfns(&tg);
return ret;
}
int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle_t sh,
- struct domain *cd, unsigned long cgfn)
+ struct domain *cd, unsigned long cgfn)
{
struct page_info *spage;
int ret = -EINVAL;
spage = __grab_shared_page(smfn);
if ( spage == NULL )
goto err_out;
+
ASSERT(smfn_type == p2m_ram_shared);
/* Check that the handles match */
if ( spage->sharing->handle != sh )
goto err_unlock;
- /* Make sure the target page is a hole in the physmap. These are typically
+ /*
+ * Make sure the target page is a hole in the physmap. These are typically
* p2m_mmio_dm, but also accept p2m_invalid and paged out pages. See the
- * definition of p2m_is_hole in p2m.h. */
+ * definition of p2m_is_hole in p2m.h.
+ */
if ( !p2m_is_hole(cmfn_type) )
{
ret = XENMEM_SHARING_OP_C_HANDLE_INVALID;
/* This is simpler than regular sharing */
BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page));
- if ( (gfn_info = mem_sharing_gfn_alloc(spage, cd, cgfn)) == NULL )
+ if ( !(gfn_info = mem_sharing_gfn_alloc(spage, cd, cgfn)) )
{
put_page_and_type(spage);
ret = -ENOMEM;
{
mem_sharing_gfn_destroy(spage, cd, gfn_info);
put_page_and_type(spage);
- } else {
- /* There is a chance we're plugging a hole where a paged out page was */
+ }
+ else
+ {
+ /*
+ * There is a chance we're plugging a hole where a paged out
+ * page was.
+ */
if ( p2m_is_paging(cmfn_type) && (cmfn_type != p2m_ram_paging_out) )
{
atomic_dec(&cd->paged_pages);
- /* Further, there is a chance this was a valid page. Don't leak it. */
+ /*
+ * Further, there is a chance this was a valid page.
+ * Don't leak it.
+ */
if ( mfn_valid(cmfn) )
{
struct page_info *cpage = mfn_to_page(cmfn);
}
-/* A note on the rationale for unshare error handling:
+/*
+ * A note on the rationale for unshare error handling:
* 1. Unshare can only fail with ENOMEM. Any other error conditions BUG_ON()'s
* 2. We notify a potential dom0 helper through a vm_event ring. But we
- * allow the notification to not go to sleep. If the event ring is full
+ * allow the notification to not go to sleep. If the event ring is full
* of ENOMEM warnings, then it's on the ball.
* 3. We cannot go to sleep until the unshare is resolved, because we might
- * be buried deep into locks (e.g. something -> copy_to_user -> __hvm_copy)
+ * be buried deep into locks (e.g. something -> copy_to_user -> __hvm_copy)
* 4. So, we make sure we:
* 4.1. return an error
* 4.2. do not corrupt shared memory
* 4.4. let the guest deal with it if the error propagation will reach it
*/
int __mem_sharing_unshare_page(struct domain *d,
- unsigned long gfn,
- uint16_t flags)
+ unsigned long gfn,
+ uint16_t flags)
{
p2m_type_t p2mt;
mfn_t mfn;
struct page_info *page, *old_page;
int last_gfn;
gfn_info_t *gfn_info = NULL;
-
+
mfn = get_gfn(d, gfn, &p2mt);
-
+
/* Has someone already unshared it? */
- if ( !p2m_is_shared(p2mt) ) {
+ if ( !p2m_is_shared(p2mt) )
+ {
put_gfn(d, gfn);
return 0;
}
page = __grab_shared_page(mfn);
if ( page == NULL )
{
- gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: "
- "%lx\n", gfn);
+ gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: %lx\n",
+ gfn);
BUG();
}
gfn_info = rmap_retrieve(d->domain_id, gfn, page);
if ( unlikely(gfn_info == NULL) )
{
- gdprintk(XENLOG_ERR, "Could not find gfn_info for shared gfn: "
- "%lx\n", gfn);
+ gdprintk(XENLOG_ERR, "Could not find gfn_info for shared gfn: %lx\n",
+ gfn);
BUG();
}
- /* Do the accounting first. If anything fails below, we have bigger
- * bigger fish to fry. First, remove the gfn from the list. */
+ /*
+ * Do the accounting first. If anything fails below, we have bigger
+ * bigger fish to fry. First, remove the gfn from the list.
+ */
last_gfn = rmap_has_one_entry(page);
if ( last_gfn )
{
- /* Clean up shared state. Get rid of the <domid, gfn> tuple
- * before destroying the rmap. */
+ /*
+ * Clean up shared state. Get rid of the <domid, gfn> tuple
+ * before destroying the rmap.
+ */
mem_sharing_gfn_destroy(page, d, gfn_info);
page_sharing_dispose(page);
page->sharing = NULL;
else
atomic_dec(&nr_saved_mfns);
- /* If the GFN is getting destroyed drop the references to MFN
- * (possibly freeing the page), and exit early */
+ /*
+ * If the GFN is getting destroyed drop the references to MFN
+ * (possibly freeing the page), and exit early.
+ */
if ( flags & MEM_SHARING_DESTROY_GFN )
{
if ( !last_gfn )
return 0;
}
-
+
if ( last_gfn )
{
/* Making a page private atomically unlocks it */
old_page = page;
page = alloc_domheap_page(d, 0);
- if ( !page )
+ if ( !page )
{
/* Undo dec of nr_saved_mfns, as the retry will decrease again. */
atomic_inc(&nr_saved_mfns);
mem_sharing_page_unlock(old_page);
put_gfn(d, gfn);
- /* Caller is responsible for placing an event
- * in the ring */
+ /*
+ * Caller is responsible for placing an event
+ * in the ring.
+ */
return -ENOMEM;
}
mem_sharing_page_unlock(old_page);
put_page_and_type(old_page);
-private_page_found:
+ private_page_found:
if ( p2m_change_type_one(d, gfn, p2m_ram_shared, p2m_ram_rw) )
{
- gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n",
- d->domain_id, gfn);
+ gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n",
+ d->domain_id, gfn);
BUG();
}
/* Update m2p entry */
set_gpfn_from_mfn(mfn_x(page_to_mfn(page)), gfn);
- /* Now that the gfn<->mfn map is properly established,
- * marking dirty is feasible */
+ /*
+ * Now that the gfn<->mfn map is properly established,
+ * marking dirty is feasible
+ */
paging_mark_dirty(d, page_to_mfn(page));
/* We do not need to unlock a private page */
put_gfn(d, gfn);
mfn_t mfn;
int set_rc;
- if ( atomic_read(&d->shr_pages) == 0 )
+ if ( !atomic_read(&d->shr_pages) )
break;
+
mfn = p2m->get_entry(p2m, _gfn(gfn), &t, &a, 0, NULL, NULL);
- if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
+ if ( mfn_valid(mfn) && t == p2m_ram_shared )
{
/* Does not fail with ENOMEM given the DESTROY flag */
- BUG_ON(__mem_sharing_unshare_page(d, gfn,
- MEM_SHARING_DESTROY_GFN));
- /* Clear out the p2m entry so no one else may try to
+ BUG_ON(__mem_sharing_unshare_page(
+ d, gfn, MEM_SHARING_DESTROY_GFN));
+ /*
+ * Clear out the p2m entry so no one else may try to
* unshare. Must succeed: we just read the old entry and
- * we hold the p2m lock. */
+ * we hold the p2m lock.
+ */
set_rc = p2m->set_entry(p2m, _gfn(gfn), _mfn(0), PAGE_ORDER_4K,
p2m_invalid, p2m_access_rwx, -1);
- ASSERT(set_rc == 0);
+ ASSERT(!set_rc);
count += 0x10;
}
else
switch ( mso.op )
{
- case XENMEM_sharing_op_nominate_gfn:
- {
- shr_handle_t handle;
+ case XENMEM_sharing_op_nominate_gfn:
+ {
+ shr_handle_t handle;
- rc = -EINVAL;
- if ( !mem_sharing_enabled(d) )
- goto out;
+ rc = -EINVAL;
+ if ( !mem_sharing_enabled(d) )
+ goto out;
- rc = nominate_page(d, _gfn(mso.u.nominate.u.gfn), 0, &handle);
- mso.u.nominate.handle = handle;
- }
- break;
+ rc = nominate_page(d, _gfn(mso.u.nominate.u.gfn), 0, &handle);
+ mso.u.nominate.handle = handle;
+ }
+ break;
- case XENMEM_sharing_op_nominate_gref:
- {
- grant_ref_t gref = mso.u.nominate.u.grant_ref;
- gfn_t gfn;
- shr_handle_t handle;
+ case XENMEM_sharing_op_nominate_gref:
+ {
+ grant_ref_t gref = mso.u.nominate.u.grant_ref;
+ gfn_t gfn;
+ shr_handle_t handle;
+
+ rc = -EINVAL;
+ if ( !mem_sharing_enabled(d) )
+ goto out;
+ rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &gfn, NULL);
+ if ( rc < 0 )
+ goto out;
+
+ rc = nominate_page(d, gfn, 3, &handle);
+ mso.u.nominate.handle = handle;
+ }
+ break;
- rc = -EINVAL;
- if ( !mem_sharing_enabled(d) )
- goto out;
- rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &gfn, NULL);
- if ( rc < 0 )
- goto out;
+ case XENMEM_sharing_op_share:
+ {
+ gfn_t sgfn, cgfn;
+ struct domain *cd;
+ shr_handle_t sh, ch;
- rc = nominate_page(d, gfn, 3, &handle);
- mso.u.nominate.handle = handle;
- }
- break;
+ rc = -EINVAL;
+ if ( !mem_sharing_enabled(d) )
+ goto out;
- case XENMEM_sharing_op_share:
+ rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
+ &cd);
+ if ( rc )
+ goto out;
+
+ rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op);
+ if ( rc )
{
- gfn_t sgfn, cgfn;
- struct domain *cd;
- shr_handle_t sh, ch;
+ rcu_unlock_domain(cd);
+ goto out;
+ }
+ if ( !mem_sharing_enabled(cd) )
+ {
+ rcu_unlock_domain(cd);
rc = -EINVAL;
- if ( !mem_sharing_enabled(d) )
- goto out;
+ goto out;
+ }
- rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
- &cd);
- if ( rc )
- goto out;
+ if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
+ {
+ grant_ref_t gref =
+ XENMEM_SHARING_OP_FIELD_GET_GREF(mso.u.share.source_gfn);
- rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op);
- if ( rc )
+ rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &sgfn,
+ NULL);
+ if ( rc < 0 )
{
rcu_unlock_domain(cd);
goto out;
}
+ }
+ else
+ sgfn = _gfn(mso.u.share.source_gfn);
- if ( !mem_sharing_enabled(cd) )
+ if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.client_gfn) )
+ {
+ grant_ref_t gref =
+ XENMEM_SHARING_OP_FIELD_GET_GREF(mso.u.share.client_gfn);
+
+ rc = mem_sharing_gref_to_gfn(cd->grant_table, gref, &cgfn,
+ NULL);
+ if ( rc < 0 )
{
rcu_unlock_domain(cd);
- rc = -EINVAL;
goto out;
}
+ }
+ else
+ cgfn = _gfn(mso.u.share.client_gfn);
- if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
- {
- grant_ref_t gref = (grant_ref_t)
- (XENMEM_SHARING_OP_FIELD_GET_GREF(
- mso.u.share.source_gfn));
- rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &sgfn,
- NULL);
- if ( rc < 0 )
- {
- rcu_unlock_domain(cd);
- goto out;
- }
- }
- else
- sgfn = _gfn(mso.u.share.source_gfn);
+ sh = mso.u.share.source_handle;
+ ch = mso.u.share.client_handle;
- if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.client_gfn) )
- {
- grant_ref_t gref = (grant_ref_t)
- (XENMEM_SHARING_OP_FIELD_GET_GREF(
- mso.u.share.client_gfn));
- rc = mem_sharing_gref_to_gfn(cd->grant_table, gref, &cgfn,
- NULL);
- if ( rc < 0 )
- {
- rcu_unlock_domain(cd);
- goto out;
- }
- }
- else
- cgfn = _gfn(mso.u.share.client_gfn);
+ rc = share_pages(d, sgfn, sh, cd, cgfn, ch);
- sh = mso.u.share.source_handle;
- ch = mso.u.share.client_handle;
+ rcu_unlock_domain(cd);
+ }
+ break;
- rc = share_pages(d, sgfn, sh, cd, cgfn, ch);
+ case XENMEM_sharing_op_add_physmap:
+ {
+ unsigned long sgfn, cgfn;
+ struct domain *cd;
+ shr_handle_t sh;
+
+ rc = -EINVAL;
+ if ( !mem_sharing_enabled(d) )
+ goto out;
+ rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
+ &cd);
+ if ( rc )
+ goto out;
+
+ rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op);
+ if ( rc )
+ {
rcu_unlock_domain(cd);
+ goto out;
}
- break;
- case XENMEM_sharing_op_add_physmap:
+ if ( !mem_sharing_enabled(cd) )
{
- unsigned long sgfn, cgfn;
- struct domain *cd;
- shr_handle_t sh;
+ rcu_unlock_domain(cd);
+ rc = -EINVAL;
+ goto out;
+ }
+ if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
+ {
+ /* Cannot add a gref to the physmap */
+ rcu_unlock_domain(cd);
rc = -EINVAL;
- if ( !mem_sharing_enabled(d) )
- goto out;
+ goto out;
+ }
- rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
- &cd);
- if ( rc )
- goto out;
+ sgfn = mso.u.share.source_gfn;
+ sh = mso.u.share.source_handle;
+ cgfn = mso.u.share.client_gfn;
- rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op);
- if ( rc )
- {
- rcu_unlock_domain(cd);
- goto out;
- }
+ rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn);
- if ( !mem_sharing_enabled(cd) )
- {
- rcu_unlock_domain(cd);
- rc = -EINVAL;
- goto out;
- }
+ rcu_unlock_domain(cd);
+ }
+ break;
- if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
- {
- /* Cannot add a gref to the physmap */
- rcu_unlock_domain(cd);
- rc = -EINVAL;
- goto out;
- }
+ case XENMEM_sharing_op_range_share:
+ {
+ unsigned long max_sgfn, max_cgfn;
+ struct domain *cd;
+
+ rc = -EINVAL;
+ if ( mso.u.range._pad[0] || mso.u.range._pad[1] ||
+ mso.u.range._pad[2] )
+ goto out;
- sgfn = mso.u.share.source_gfn;
- sh = mso.u.share.source_handle;
- cgfn = mso.u.share.client_gfn;
+ /*
+ * We use opaque for the hypercall continuation value.
+ * Ideally the user sets this to 0 in the beginning but
+ * there is no good way of enforcing that here, so we just check
+ * that it's at least in range.
+ */
+ if ( mso.u.range.opaque &&
+ (mso.u.range.opaque < mso.u.range.first_gfn ||
+ mso.u.range.opaque > mso.u.range.last_gfn) )
+ goto out;
- rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn);
+ if ( !mem_sharing_enabled(d) )
+ goto out;
+ rc = rcu_lock_live_remote_domain_by_id(mso.u.range.client_domain,
+ &cd);
+ if ( rc )
+ goto out;
+
+ /*
+ * We reuse XENMEM_sharing_op_share XSM check here as this is
+ * essentially the same concept repeated over multiple pages.
+ */
+ rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd,
+ XENMEM_sharing_op_share);
+ if ( rc )
+ {
rcu_unlock_domain(cd);
+ goto out;
}
- break;
- case XENMEM_sharing_op_range_share:
+ if ( !mem_sharing_enabled(cd) )
{
- unsigned long max_sgfn, max_cgfn;
- struct domain *cd;
-
+ rcu_unlock_domain(cd);
rc = -EINVAL;
- if ( mso.u.range._pad[0] || mso.u.range._pad[1] ||
- mso.u.range._pad[2] )
- goto out;
-
- /*
- * We use opaque for the hypercall continuation value.
- * Ideally the user sets this to 0 in the beginning but
- * there is no good way of enforcing that here, so we just check
- * that it's at least in range.
- */
- if ( mso.u.range.opaque &&
- (mso.u.range.opaque < mso.u.range.first_gfn ||
- mso.u.range.opaque > mso.u.range.last_gfn) )
- goto out;
-
- if ( !mem_sharing_enabled(d) )
- goto out;
-
- rc = rcu_lock_live_remote_domain_by_id(mso.u.range.client_domain,
- &cd);
- if ( rc )
- goto out;
-
- /*
- * We reuse XENMEM_sharing_op_share XSM check here as this is
- * essentially the same concept repeated over multiple pages.
- */
- rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd,
- XENMEM_sharing_op_share);
- if ( rc )
- {
- rcu_unlock_domain(cd);
- goto out;
- }
-
- if ( !mem_sharing_enabled(cd) )
- {
- rcu_unlock_domain(cd);
- rc = -EINVAL;
- goto out;
- }
-
- /*
- * Sanity check only, the client should keep the domains paused for
- * the duration of this op.
- */
- if ( !atomic_read(&d->pause_count) ||
- !atomic_read(&cd->pause_count) )
- {
- rcu_unlock_domain(cd);
- rc = -EINVAL;
- goto out;
- }
+ goto out;
+ }
- max_sgfn = domain_get_maximum_gpfn(d);
- max_cgfn = domain_get_maximum_gpfn(cd);
+ /*
+ * Sanity check only, the client should keep the domains paused for
+ * the duration of this op.
+ */
+ if ( !atomic_read(&d->pause_count) ||
+ !atomic_read(&cd->pause_count) )
+ {
+ rcu_unlock_domain(cd);
+ rc = -EINVAL;
+ goto out;
+ }
- if ( max_sgfn < mso.u.range.first_gfn ||
- max_sgfn < mso.u.range.last_gfn ||
- max_cgfn < mso.u.range.first_gfn ||
- max_cgfn < mso.u.range.last_gfn )
- {
- rcu_unlock_domain(cd);
- rc = -EINVAL;
- goto out;
- }
+ max_sgfn = domain_get_maximum_gpfn(d);
+ max_cgfn = domain_get_maximum_gpfn(cd);
- rc = range_share(d, cd, &mso.u.range);
+ if ( max_sgfn < mso.u.range.first_gfn ||
+ max_sgfn < mso.u.range.last_gfn ||
+ max_cgfn < mso.u.range.first_gfn ||
+ max_cgfn < mso.u.range.last_gfn )
+ {
rcu_unlock_domain(cd);
+ rc = -EINVAL;
+ goto out;
+ }
- if ( rc > 0 )
- {
- if ( __copy_to_guest(arg, &mso, 1) )
- rc = -EFAULT;
- else
- rc = hypercall_create_continuation(__HYPERVISOR_memory_op,
- "lh", XENMEM_sharing_op,
- arg);
- }
+ rc = range_share(d, cd, &mso.u.range);
+ rcu_unlock_domain(cd);
+
+ if ( rc > 0 )
+ {
+ if ( __copy_to_guest(arg, &mso, 1) )
+ rc = -EFAULT;
else
- mso.u.range.opaque = 0;
+ rc = hypercall_create_continuation(__HYPERVISOR_memory_op,
+ "lh", XENMEM_sharing_op,
+ arg);
}
- break;
+ else
+ mso.u.range.opaque = 0;
+ }
+ break;
- case XENMEM_sharing_op_debug_gfn:
- rc = debug_gfn(d, _gfn(mso.u.debug.u.gfn));
- break;
+ case XENMEM_sharing_op_debug_gfn:
+ rc = debug_gfn(d, _gfn(mso.u.debug.u.gfn));
+ break;
- case XENMEM_sharing_op_debug_gref:
- rc = debug_gref(d, mso.u.debug.u.gref);
- break;
+ case XENMEM_sharing_op_debug_gref:
+ rc = debug_gref(d, mso.u.debug.u.gref);
+ break;
- default:
- rc = -ENOSYS;
- break;
+ default:
+ rc = -ENOSYS;
+ break;
}
if ( !rc && __copy_to_guest(arg, &mso, 1) )
/* Only HAP is supported */
if ( !hap_enabled(d) )
- return -ENODEV;
+ return -ENODEV;
- switch(mec->op)
+ switch ( mec->op )
{
- case XEN_DOMCTL_MEM_SHARING_CONTROL:
- {
- rc = 0;
- if ( unlikely(is_iommu_enabled(d) && mec->u.enable) )
- rc = -EXDEV;
- else
- d->arch.hvm.mem_sharing_enabled = mec->u.enable;
- }
+ case XEN_DOMCTL_MEM_SHARING_CONTROL:
+ rc = 0;
+ if ( unlikely(is_iommu_enabled(d) && mec->u.enable) )
+ rc = -EXDEV;
+ else
+ d->arch.hvm.mem_sharing_enabled = mec->u.enable;
break;
- default:
- rc = -ENOSYS;
+ default:
+ rc = -ENOSYS;
+ break;
}
return rc;