batched interface which preserves access/dirty pte flags.
Signed-off-by: Bruce Rogers <brogers@novell.com>
mach_lp, (u64)entry_a | ((u64)entry_b<<32));
}
#endif
+
+#define MAX_BATCHED_FULL_PTES 32
+
+int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end, pgprot_t newprot)
+{
+ int rc = 0, i = 0;
+ mmu_update_t u[MAX_BATCHED_FULL_PTES];
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ do {
+ if (pte_present(*pte)) {
+ u[i].ptr = virt_to_machine(pte) | MMU_PT_UPDATE_PRESERVE_AD;
+ u[i].val = __pte_val(pte_modify(*pte, newprot));
+ if (++i == MAX_BATCHED_FULL_PTES) {
+ if ((rc = HYPERVISOR_mmu_update(
+ &u[0], i, NULL, DOMID_SELF)) != 0)
+ break;
+ i = 0;
+ }
+ }
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ if (i)
+ rc = HYPERVISOR_mmu_update( &u[0], i, NULL, DOMID_SELF);
+ pte_unmap_unlock(pte - 1, ptl);
+ BUG_ON(rc && rc != -ENOSYS);
+ return !rc;
+}
})
#endif
+#ifndef arch_change_pte_range
+#define arch_change_pte_range(mm, pmd, addr, end, newprot) 0
+#endif
+
#ifndef __ASSEMBLY__
/*
* When walking page tables, we usually want to skip any p?d_none entries;
unsigned long address,
unsigned long size);
+int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end, pgprot_t newprot);
+
+#define arch_change_pte_range(mm, pmd, addr, end, newprot) \
+ xen_change_pte_range(mm, pmd, addr, end, newprot)
+
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
unsigned long address,
unsigned long size);
+int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end, pgprot_t newprot);
+
+#define arch_change_pte_range(mm, pmd, addr, end, newprot) \
+ xen_change_pte_range(mm, pmd, addr, end, newprot)
+
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
* ptr[:2] -- Machine address within the frame whose mapping to modify.
* The frame must belong to the FD, if one is specified.
* val -- Value to write into the mapping entry.
+ *
+ * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
+ * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
+ * with those in @val.
*/
-#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
-#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
+#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
+#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
+#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
/*
* MMU EXTENDED OPERATIONS
#define __mk_unsigned_long(x) x ## UL
#define mk_unsigned_long(x) __mk_unsigned_long(x)
-DEFINE_XEN_GUEST_HANDLE(uint8_t);
-DEFINE_XEN_GUEST_HANDLE(uint16_t);
-DEFINE_XEN_GUEST_HANDLE(uint32_t);
-DEFINE_XEN_GUEST_HANDLE(uint64_t);
+__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t);
+__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t);
+__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t);
+__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
#else /* __ASSEMBLY__ */
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
+ if (arch_change_pte_range(mm, pmd, addr, next, newprot))
+ continue;
change_pte_range(mm, pmd, addr, next, newprot);
} while (pmd++, addr = next, addr != end);
}