guest_hv_clock->version = ++vcpu->hv_clock.version;
- mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
+ kvm_gpc_mark_dirty(gpc);
read_unlock_irqrestore(&gpc->lock, flags);
trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
smp_wmb();
}
- if (user_len2)
+ if (user_len2) {
+ kvm_gpc_mark_dirty(gpc2);
read_unlock(&gpc2->lock);
+ }
+ kvm_gpc_mark_dirty(gpc1);
read_unlock_irqrestore(&gpc1->lock, flags);
-
- mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
- if (user_len2)
- mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
}
void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
: "0" (evtchn_pending_sel32));
WRITE_ONCE(vi->evtchn_upcall_pending, 1);
}
+
+ kvm_gpc_mark_dirty(gpc);
read_unlock_irqrestore(&gpc->lock, flags);
/* For the per-vCPU lapic vector, deliver it as MSI. */
if (v->arch.xen.upcall_vector)
kvm_xen_inject_vcpu_vector(v);
-
- mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
}
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
*/
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
+/**
+ * kvm_gpc_mark_dirty - mark a cached page as dirty.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ */
+void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc);
+
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
}
EXPORT_SYMBOL_GPL(kvm_gpc_activate);
+void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
+{
+ mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(kvm_gpc_mark_dirty);
+
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
{
struct kvm *kvm = gpc->kvm;