#if GUEST_PAGING_LEVELS == 2
/* From one page of a multi-page shadow, find the next one */
-static inline mfn_t sh_next_page(mfn_t smfn)
+static inline mfn_t cf_check sh_next_page(mfn_t smfn)
{
struct page_info *pg = mfn_to_page(smfn), *next;
struct page_list_head h = PAGE_LIST_HEAD_INIT(h);
return (u32)((unsigned long)ptr & ~PAGE_MASK) / sizeof(guest_l1e_t);
}
-static u32
-shadow_l1_index(mfn_t *smfn, u32 guest_index)
+static u32 cf_check shadow_l1_index(mfn_t *smfn, u32 guest_index)
{
#if (GUEST_PAGING_LEVELS == 2)
ASSERT(mfn_to_page(*smfn)->u.sh.head);
#endif
}
-static u32
-shadow_l2_index(mfn_t *smfn, u32 guest_index)
+static u32 cf_check shadow_l2_index(mfn_t *smfn, u32 guest_index)
{
#if (GUEST_PAGING_LEVELS == 2)
int i;
#if GUEST_PAGING_LEVELS >= 4
-static u32
-shadow_l3_index(mfn_t *smfn, u32 guest_index)
+static u32 cf_check shadow_l3_index(mfn_t *smfn, u32 guest_index)
{
return guest_index;
}
-static u32
-shadow_l4_index(mfn_t *smfn, u32 guest_index)
+static u32 cf_check shadow_l4_index(mfn_t *smfn, u32 guest_index)
{
return guest_index;
}
/**************************************************************************/
/* Create a shadow of a given guest page.
*/
-static mfn_t
+static mfn_t cf_check
sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
{
struct domain *d = v->domain;
*/
#if GUEST_PAGING_LEVELS >= 4
-static int validate_gl4e(struct vcpu *v, void *new_ge, mfn_t sl4mfn, void *se)
+static int cf_check validate_gl4e(
+ struct vcpu *v, void *new_ge, mfn_t sl4mfn, void *se)
{
shadow_l4e_t new_sl4e;
guest_l4e_t new_gl4e = *(guest_l4e_t *)new_ge;
}
-static int validate_gl3e(struct vcpu *v, void *new_ge, mfn_t sl3mfn, void *se)
+static int cf_check validate_gl3e(
+ struct vcpu *v, void *new_ge, mfn_t sl3mfn, void *se)
{
struct domain *d = v->domain;
shadow_l3e_t new_sl3e;
}
#endif // GUEST_PAGING_LEVELS >= 4
-static int validate_gl2e(struct vcpu *v, void *new_ge, mfn_t sl2mfn, void *se)
+static int cf_check validate_gl2e(
+ struct vcpu *v, void *new_ge, mfn_t sl2mfn, void *se)
{
struct domain *d = v->domain;
shadow_l2e_t new_sl2e;
return result;
}
-static int validate_gl1e(struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se)
+static int cf_check validate_gl1e(
+ struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se)
{
struct domain *d = v->domain;
shadow_l1e_t new_sl1e;
#endif
static DEFINE_PER_CPU(guest_pa_t,trace_emulate_write_val);
-static void trace_emulate_write_val(const void *ptr, unsigned long vaddr,
- const void *src, unsigned int bytes)
+static void cf_check trace_emulate_write_val(
+ const void *ptr, unsigned long vaddr, const void *src, unsigned int bytes)
{
#if GUEST_PAGING_LEVELS == 3
if ( vaddr == this_cpu(trace_emulate_initial_va) )
* shadow code (and the guest should retry) or 0 if it is not (and the
* fault should be handled elsewhere or passed to the guest). */
-static int sh_page_fault(struct vcpu *v,
- unsigned long va,
- struct cpu_user_regs *regs)
+static int cf_check sh_page_fault(
+ struct vcpu *v, unsigned long va, struct cpu_user_regs *regs)
{
struct domain *d = v->domain;
walk_t gw;
* instruction should be issued on the hardware, or false if it's safe not
* to do so.
*/
-static bool sh_invlpg(struct vcpu *v, unsigned long linear)
+static bool cf_check sh_invlpg(struct vcpu *v, unsigned long linear)
{
mfn_t sl1mfn;
shadow_l2e_t sl2e;
#ifdef CONFIG_HVM
-static unsigned long
-sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
- unsigned long va, uint32_t *pfec)
+static unsigned long cf_check sh_gva_to_gfn(
+ struct vcpu *v, struct p2m_domain *p2m, unsigned long va, uint32_t *pfec)
/* Called to translate a guest virtual address to what the *guest*
* pagetables would map it to. */
{
* Removes v->arch.paging.shadow.shadow_table[].
* Does all appropriate management/bookkeeping/refcounting/etc...
*/
-static void
-sh_detach_old_tables(struct vcpu *v)
+static void cf_check sh_detach_old_tables(struct vcpu *v)
{
struct domain *d = v->domain;
mfn_t smfn;
}
}
-static void
-sh_update_cr3(struct vcpu *v, int do_locking, bool noflush)
+static void cf_check sh_update_cr3(struct vcpu *v, int do_locking, bool noflush)
/* Updates vcpu->arch.cr3 after the guest has changed CR3.
* Paravirtual guests should set v->arch.guest_table (and guest_table_user,
* if appropriate).
#endif /* OOS */
#if defined(CONFIG_HVM) && (SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC)
-static int sh_guess_wrmap(struct vcpu *v, unsigned long vaddr, mfn_t gmfn)
+static int cf_check sh_guess_wrmap(
+ struct vcpu *v, unsigned long vaddr, mfn_t gmfn)
/* Look up this vaddr in the current shadow and see if it's a writeable
* mapping of this gmfn. If so, remove it. Returns 1 if it worked. */
{
}
#endif
-int sh_rm_write_access_from_l1(struct domain *d, mfn_t sl1mfn,
- mfn_t readonly_mfn)
+int cf_check sh_rm_write_access_from_l1(
+ struct domain *d, mfn_t sl1mfn, mfn_t readonly_mfn)
/* Excises all writeable mappings to readonly_mfn from this l1 shadow table */
{
shadow_l1e_t *sl1e;
}
-int sh_rm_mappings_from_l1(struct domain *d, mfn_t sl1mfn, mfn_t target_mfn)
+int cf_check sh_rm_mappings_from_l1(
+ struct domain *d, mfn_t sl1mfn, mfn_t target_mfn)
/* Excises all mappings to guest frame from this shadow l1 table */
{
shadow_l1e_t *sl1e;
}
}
-int sh_remove_l1_shadow(struct domain *d, mfn_t sl2mfn, mfn_t sl1mfn)
+int cf_check sh_remove_l1_shadow(struct domain *d, mfn_t sl2mfn, mfn_t sl1mfn)
/* Remove all mappings of this l1 shadow from this l2 shadow */
{
shadow_l2e_t *sl2e;
}
#if GUEST_PAGING_LEVELS >= 4
-int sh_remove_l2_shadow(struct domain *d, mfn_t sl3mfn, mfn_t sl2mfn)
+int cf_check sh_remove_l2_shadow(struct domain *d, mfn_t sl3mfn, mfn_t sl2mfn)
/* Remove all mappings of this l2 shadow from this l3 shadow */
{
shadow_l3e_t *sl3e;
return done;
}
-int sh_remove_l3_shadow(struct domain *d, mfn_t sl4mfn, mfn_t sl3mfn)
+int cf_check sh_remove_l3_shadow(struct domain *d, mfn_t sl4mfn, mfn_t sl3mfn)
/* Remove all mappings of this l3 shadow from this l4 shadow */
{
shadow_l4e_t *sl4e;
* and in the meantime we unhook its top-level user-mode entries. */
#if GUEST_PAGING_LEVELS == 3
-static void sh_pagetable_dying(paddr_t gpa)
+static void cf_check sh_pagetable_dying(paddr_t gpa)
{
struct vcpu *v = current;
struct domain *d = v->domain;
put_gfn(d, l3gfn);
}
#else
-static void sh_pagetable_dying(paddr_t gpa)
+static void cf_check sh_pagetable_dying(paddr_t gpa)
{
struct vcpu *v = current;
struct domain *d = v->domain;
return NULL;
}
-int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x)
+int cf_check sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x)
{
guest_l1e_t *gl1e, *gp;
shadow_l1e_t *sl1e;
return done;
}
-int sh_audit_fl1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x)
+int cf_check sh_audit_fl1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x)
{
guest_l1e_t *gl1e, e;
shadow_l1e_t *sl1e;
return 0;
}
-int sh_audit_l2_table(struct vcpu *v, mfn_t sl2mfn, mfn_t x)
+int cf_check sh_audit_l2_table(struct vcpu *v, mfn_t sl2mfn, mfn_t x)
{
struct domain *d = v->domain;
guest_l2e_t *gl2e, *gp;
}
#if GUEST_PAGING_LEVELS >= 4
-int sh_audit_l3_table(struct vcpu *v, mfn_t sl3mfn, mfn_t x)
+int cf_check sh_audit_l3_table(struct vcpu *v, mfn_t sl3mfn, mfn_t x)
{
struct domain *d = v->domain;
guest_l3e_t *gl3e, *gp;
return 0;
}
-int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t x)
+int cf_check sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t x)
{
struct domain *d = v->domain;
guest_l4e_t *gl4e, *gp;
SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, GUEST_LEVELS)
(struct domain *d, mfn_t sl4mfn, int user_only);
-extern int
+int cf_check
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, GUEST_LEVELS)
(struct domain *d, mfn_t sl1mfn, mfn_t readonly_mfn);
-extern int
+int cf_check
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, GUEST_LEVELS)
(struct domain *d, mfn_t sl1mfn, mfn_t target_mfn);
SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, GUEST_LEVELS)
(struct domain *d, void *ep, mfn_t smfn);
-extern int
+int cf_check
SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, GUEST_LEVELS)
(struct domain *d, mfn_t sl2mfn, mfn_t sl1mfn);
-extern int
+int cf_check
SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, GUEST_LEVELS)
(struct domain *d, mfn_t sl3mfn, mfn_t sl2mfn);
-extern int
+int cf_check
SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, GUEST_LEVELS)
(struct domain *d, mfn_t sl4mfn, mfn_t sl3mfn);
#if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES
-int
+int cf_check
SHADOW_INTERNAL_NAME(sh_audit_l1_table, GUEST_LEVELS)
(struct vcpu *v, mfn_t sl1mfn, mfn_t x);
-int
+int cf_check
SHADOW_INTERNAL_NAME(sh_audit_fl1_table, GUEST_LEVELS)
(struct vcpu *v, mfn_t sl1mfn, mfn_t x);
-int
+int cf_check
SHADOW_INTERNAL_NAME(sh_audit_l2_table, GUEST_LEVELS)
(struct vcpu *v, mfn_t sl2mfn, mfn_t x);
-int
+int cf_check
SHADOW_INTERNAL_NAME(sh_audit_l3_table, GUEST_LEVELS)
(struct vcpu *v, mfn_t sl3mfn, mfn_t x);
-int
+int cf_check
SHADOW_INTERNAL_NAME(sh_audit_l4_table, GUEST_LEVELS)
(struct vcpu *v, mfn_t sl4mfn, mfn_t x);
#endif