mfn = mfn_x(INVALID_MFN);
}
+ if ( mfn != mfn_x(INVALID_MFN) &&
+ !gfn_eq(gnttab_get_frame_gfn(d, status, idx), INVALID_GFN) )
+ {
+ rc = guest_physmap_remove_page(d,
+ gnttab_get_frame_gfn(d, status, idx),
+ _mfn(mfn), 0);
+ if ( rc )
+ {
+ grant_write_unlock(d->grant_table);
+ return rc;
+ }
+ }
+
if ( mfn != mfn_x(INVALID_MFN) )
{
- if ( status )
- d->arch.grant_status_gfn[idx] = gfn;
- else
- d->arch.grant_shared_gfn[idx] = gfn;
+ gnttab_set_frame_gfn(d, status, idx, gfn);
t = p2m_ram_rw;
}
return -ENOMEM;
}
-static void
+static int
gnttab_unpopulate_status_frames(struct domain *d, struct grant_table *gt)
{
- int i;
+ unsigned int i;
for ( i = 0; i < nr_status_frames(gt); i++ )
{
struct page_info *pg = virt_to_page(gt->status[i]);
+ gfn_t gfn = gnttab_get_frame_gfn(d, true, i);
+
+ /*
+ * For translated domains, recovering from failure after partial
+ * changes were made is more complicated than it seems worth
+ * implementing at this time. Hence respective error paths below
+ * crash the domain in such a case.
+ */
+ if ( paging_mode_translate(d) )
+ {
+ int rc = gfn_eq(gfn, INVALID_GFN)
+ ? 0
+ : guest_physmap_remove_page(d, gfn,
+ _mfn(page_to_mfn(pg)), 0);
+
+ if ( rc )
+ {
+ gprintk(XENLOG_ERR,
+ "Could not remove status frame %u (GFN %#lx) from P2M\n",
+ i, gfn_x(gfn));
+ domain_crash(d);
+ return rc;
+ }
+ gnttab_set_frame_gfn(d, true, i, INVALID_GFN);
+ }
BUG_ON(page_get_owner(pg) != d);
if ( test_and_clear_bit(_PGC_allocated, &pg->count_info) )
put_page(pg);
- BUG_ON(pg->count_info & ~PGC_xen_heap);
+
+ if ( pg->count_info & ~PGC_xen_heap )
+ {
+ if ( paging_mode_translate(d) )
+ {
+ gprintk(XENLOG_ERR,
+ "Wrong page state %#lx of status frame %u (GFN %#lx)\n",
+ pg->count_info, i, gfn_x(gfn));
+ domain_crash(d);
+ }
+ else
+ {
+ if ( get_page(pg, d) )
+ set_bit(_PGC_allocated, &pg->count_info);
+ while ( i-- )
+ gnttab_create_status_page(d, gt, i);
+ }
+ return -EBUSY;
+ }
+
+ page_set_owner(pg, NULL);
+ }
+
+ for ( i = 0; i < nr_status_frames(gt); i++ )
+ {
free_xenheap_page(gt->status[i]);
gt->status[i] = NULL;
}
gt->nr_status_frames = 0;
+
+ return 0;
}
/*
break;
}
- if ( op.version < 2 && gt->gt_version == 2 )
- gnttab_unpopulate_status_frames(currd, gt);
+ if ( op.version < 2 && gt->gt_version == 2 &&
+ (res = gnttab_unpopulate_status_frames(currd, gt)) != 0 )
+ goto out_unlock;
/* Make sure there's no crud left over from the old version. */
for ( i = 0; i < nr_grant_frames(gt); i++ )
return 1;
}
+#define gnttab_set_frame_gfn(d, st, idx, gfn) \
+ do { \
+ ((st) ? (d)->arch.grant_status_gfn \
+ : (d)->arch.grant_shared_gfn)[idx] = (gfn); \
+ } while ( 0 )
+
+#define gnttab_get_frame_gfn(d, st, idx) ({ \
+ _gfn((st) ? gnttab_status_gmfn(d, (d)->grant_table, idx) \
+ : gnttab_shared_gmfn(d, (d)->grant_table, idx)); \
+})
+
#define gnttab_create_shared_page(d, t, i) \
do { \
share_xen_page_with_guest( \
int replace_grant_host_mapping(
uint64_t addr, unsigned long frame, uint64_t new_addr, unsigned int flags);
+#define gnttab_set_frame_gfn(d, st, idx, gfn) do {} while ( 0 )
+#define gnttab_get_frame_gfn(d, st, idx) ({ \
+ unsigned long mfn_ = (st) ? gnttab_status_mfn((d)->grant_table, idx) \
+ : gnttab_shared_mfn((d)->grant_table, idx); \
+ unsigned long gpfn_ = get_gpfn_from_mfn(mfn_); \
+ VALID_M2P(gpfn_) ? _gfn(gpfn_) : INVALID_GFN; \
+})
+
#define gnttab_create_shared_page(d, t, i) \
do { \
share_xen_page_with_guest( \
} while ( 0 )
-#define gnttab_shared_mfn(d, t, i) \
+#define gnttab_shared_mfn(t, i) \
((virt_to_maddr((t)->shared_raw[i]) >> PAGE_SHIFT))
#define gnttab_shared_gmfn(d, t, i) \
- (mfn_to_gmfn(d, gnttab_shared_mfn(d, t, i)))
+ (mfn_to_gmfn(d, gnttab_shared_mfn(t, i)))
#define gnttab_status_mfn(t, i) \