#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/seqlock.h>
#include <xen/interface/xen.h>
#include <xen/gnttab.h>
#include <asm/pgtable.h>
static struct gnttab_free_callback *gnttab_free_callback_list;
+static DEFINE_SEQLOCK(gnttab_dma_lock);
+
static int gnttab_expand(unsigned int req_entries);
#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
static void gnttab_page_free(struct page *page)
{
- if (page->mapping) {
- put_page((struct page *)page->mapping);
- page->mapping = NULL;
- }
-
ClearPageForeign(page);
gnttab_reset_grant_page(page);
put_page(page);
mfn = pfn_to_mfn(pfn);
new_mfn = virt_to_mfn(new_addr);
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- set_phys_to_machine(pfn, new_mfn);
- set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
+ write_seqlock(&gnttab_dma_lock);
- mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- mmu.val = pfn;
- err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
- BUG_ON(err);
+ /* Make seq visible before checking page_mapped. */
+ smp_mb();
+
+ /* Has the page been DMA-mapped? */
+ if (unlikely(page_mapped(page))) {
+ write_sequnlock(&gnttab_dma_lock);
+ put_page(new_page);
+ err = -EBUSY;
+ goto out;
}
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ set_phys_to_machine(pfn, new_mfn);
+
gnttab_set_replace_op(&unmap, (unsigned long)addr,
(unsigned long)new_addr, ref);
BUG_ON(err);
BUG_ON(unmap.status);
+ write_sequnlock(&gnttab_dma_lock);
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
+
+ mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ mmu.val = pfn;
+ err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
+ BUG_ON(err);
+ }
+
new_page->mapping = page->mapping;
new_page->index = page->index;
set_bit(PG_foreign, &new_page->flags);
SetPageForeign(page, gnttab_page_free);
page->mapping = NULL;
- /*
- * Ensure that there is a barrier between setting the p2m entry
- * and checking the map count. See gnttab_dma_map_page.
- */
- smp_mb();
-
- /* Has the page been DMA-mapped? */
- if (unlikely(page_mapped(page))) {
- err = -EBUSY;
- page->mapping = (void *)new_page;
- }
-
out:
put_page(page);
return err;
-
}
EXPORT_SYMBOL(gnttab_copy_grant_page);
*/
maddr_t gnttab_dma_map_page(struct page *page)
{
- maddr_t mfn = pfn_to_mfn(page_to_pfn(page)), mfn2;
+ maddr_t maddr = page_to_bus(page);
+ unsigned int seq;
if (!PageForeign(page))
- return mfn << PAGE_SHIFT;
+ return maddr;
- if (mfn_to_local_pfn(mfn) < max_mapnr)
- return mfn << PAGE_SHIFT;
+ do {
+ seq = read_seqbegin(&gnttab_dma_lock);
+ maddr = page_to_bus(page);
- atomic_set(&page->_mapcount, 0);
+ /* Has it become a local MFN? */
+ if (pfn_valid(mfn_to_local_pfn(maddr >> PAGE_SHIFT)))
+ break;
- /* This barrier corresponds to the one in gnttab_copy_grant_page. */
- smp_mb();
+ atomic_set(&page->_mapcount, 0);
- /* Has this page been copied in the mean time? */
- mfn2 = pfn_to_mfn(page_to_pfn(page));
+ /* Make _mapcount visible before read_seqretry. */
+ smp_mb();
+ } while (unlikely(read_seqretry(&gnttab_dma_lock, seq)));
- return mfn2 << PAGE_SHIFT;
+ return maddr;
}
int gnttab_resume(void)