{
gfn_t s = _gfn(domctl->u.cacheflush.start_pfn);
gfn_t e = gfn_add(s, domctl->u.cacheflush.nr_pfns);
+ int rc;
if ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) )
return -EINVAL;
if ( gfn_x(e) < gfn_x(s) )
return -EINVAL;
- return p2m_cache_flush_range(d, s, e);
+ /* XXX: Handle preemption */
+ do
+ rc = p2m_cache_flush_range(d, &s, e);
+ while ( rc == -ERESTART );
+
+ return rc;
}
case XEN_DOMCTL_bind_pt_irq:
{
return rc;
}
-int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
+int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
gfn_t next_block_gfn;
+ gfn_t start = *pstart;
mfn_t mfn = INVALID_MFN;
p2m_type_t t;
unsigned int order;
+ int rc = 0;
+ /* Counter for preemption */
+ unsigned short count = 0;
/*
* The operation cache flush will invalidate the RAM assigned to the
while ( gfn_x(start) < gfn_x(end) )
{
+ /*
+ * Cleaning the cache for the P2M may take a long time. So we
+ * need to be able to preempt. We will arbitrarily preempt every
+ * time count reach 512 or above.
+ *
+ * The count will be incremented by:
+ * - 1 on region skipped
+ * - 10 for each page requiring a flush
+ */
+ if ( count >= 512 )
+ {
+ if ( softirq_pending(smp_processor_id()) )
+ {
+ rc = -ERESTART;
+ break;
+ }
+ count = 0;
+ }
+
/*
* We want to flush page by page as:
* - it may not be possible to map the full block (can be up to 1GB)
if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
{
+ count++;
start = next_block_gfn;
continue;
}
}
+ count += 10;
+
flush_page_to_ram(mfn_x(mfn), false);
start = gfn_add(start, 1);
mfn = mfn_add(mfn, 1);
}
- invalidate_icache();
+ if ( rc != -ERESTART )
+ invalidate_icache();
p2m_read_unlock(p2m);
- return 0;
+ *pstart = start;
+
+ return rc;
}
mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
/*
* Clean & invalidate caches corresponding to a region [start,end) of guest
* address space.
+ *
+ * start will get updated if the function is preempted.
*/
-int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end);
+int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end);
/*
* Map a region in the guest p2m with a specific p2m type.