* sh_pin() has boolean properties, so switch its return type.
* sh_remove_shadows() uses ints everywhere other than its stub.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags);
-static int sh_enable_log_dirty(struct domain *, bool_t log_global);
+static int sh_enable_log_dirty(struct domain *, bool log_global);
static int sh_disable_log_dirty(struct domain *);
static void sh_clean_dirty_bitmap(struct domain *);
/* Shadow specific code which is called in paging_log_dirty_enable().
* Return 0 if no problem found.
*/
-static int sh_enable_log_dirty(struct domain *d, bool_t log_global)
+static int sh_enable_log_dirty(struct domain *d, bool log_global)
{
int ret;
/*
* Write a new value into the guest pagetable, and update the shadows
- * appropriately. Returns 0 if we page-faulted, 1 for success.
+ * appropriately. Returns false if we page-faulted, true for success.
*/
-static bool_t
+static bool
sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
{
#if CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS
return !failed;
#else
- return 0;
+ return false;
#endif
}
/*
* Cmpxchg a new value into the guest pagetable, and update the shadows
- * appropriately. Returns 0 if we page-faulted, 1 if not.
+ * appropriately. Returns false if we page-faulted, true if not.
* N.B. caller should check the value of "old" to see if the cmpxchg itself
* was successful.
*/
-static bool_t
+static bool
sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old,
intpte_t new, mfn_t gmfn)
{
return !failed;
#else
- return 0;
+ return false;
#endif
}
gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
u32 pass_thru_flags;
u32 gflags, sflags;
- bool_t mmio_mfn;
+ bool mmio_mfn;
/* We don't shadow PAE l3s */
ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
/*
- * Called when the guest requests an invlpg. Returns 1 if the invlpg
- * instruction should be issued on the hardware, or 0 if it's safe not
+ * Called when the guest requests an invlpg. Returns true if the invlpg
+ * instruction should be issued on the hardware, or false if it's safe not
* to do so.
*/
-static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
+static bool sh_invlpg(struct vcpu *v, unsigned long va)
{
mfn_t sl1mfn;
shadow_l2e_t sl2e;
if ( !(shadow_l4e_get_flags(
sh_linear_l4_table(v)[shadow_l4_linear_offset(va)])
& _PAGE_PRESENT) )
- return 0;
+ return false;
/* This must still be a copy-from-user because we don't have the
* paging lock, and the higher-level shadows might disappear
* under our feet. */
sizeof (sl3e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
- return 0;
+ return false;
}
if ( !(shadow_l3e_get_flags(sl3e) & _PAGE_PRESENT) )
- return 0;
+ return false;
}
#else /* SHADOW_PAGING_LEVELS == 3 */
if ( !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(va)])
& _PAGE_PRESENT) )
// no need to flush anything if there's no SL2...
- return 0;
+ return false;
#endif
/* This must still be a copy-from-user because we don't have the shadow
sizeof (sl2e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
- return 0;
+ return false;
}
// If there's nothing shadowed for this particular sl2e, then
// there is no need to do an invlpg, either...
//
if ( !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) )
- return 0;
+ return false;
// Check to see if the SL2 is a splintered superpage...
// If so, then we'll need to flush the entire TLB (because that's
== SH_type_fl1_shadow )
{
flush_tlb_local();
- return 0;
+ return false;
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
{
perfc_incr(shadow_invlpg_fault);
paging_unlock(d);
- return 0;
+ return false;
}
if ( !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) )
{
paging_unlock(d);
- return 0;
+ return false;
}
sl1mfn = shadow_l2e_get_mfn(sl2e);
}
paging_unlock(d);
/* Need the invlpg, to pick up the disappeareance of the sl1e */
- return 1;
+ return true;
}
}
#endif
- return 1;
+ return true;
}
#include <xen/mm.h>
#include <asm/shadow.h>
-static int _enable_log_dirty(struct domain *d, bool_t log_global)
+static int _enable_log_dirty(struct domain *d, bool log_global)
{
ASSERT(is_pv_domain(d));
return -EOPNOTSUPP;
return 0;
}
-static bool_t _invlpg(struct vcpu *v, unsigned long va)
+static bool _invlpg(struct vcpu *v, unsigned long va)
{
ASSERT_UNREACHABLE();
- return 1;
+ return true;
}
static unsigned long _gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
pos ? (tmp = prev_pinned_shadow(pos, (dom)), 1) : 0; \
pos = tmp )
-/* Pin a shadow page: take an extra refcount, set the pin bit,
+/*
+ * Pin a shadow page: take an extra refcount, set the pin bit,
* and put the shadow at the head of the list of pinned shadows.
- * Returns 0 for failure, 1 for success. */
-static inline int sh_pin(struct domain *d, mfn_t smfn)
+ * Returns false for failure, true for success.
+ */
+static inline bool sh_pin(struct domain *d, mfn_t smfn)
{
struct page_info *sp[4];
struct page_list_head *pin_list;
unsigned int i, pages;
- bool_t already_pinned;
+ bool already_pinned;
ASSERT(mfn_valid(smfn));
sp[0] = mfn_to_page(smfn);
pin_list = &d->arch.paging.shadow.pinned_shadows;
if ( already_pinned && sp[0] == page_list_first(pin_list) )
- return 1;
+ return true;
/* Treat the up-to-four pages of the shadow as a unit in the list ops */
for ( i = 1; i < pages; i++ )
{
/* Not pinned: pin it! */
if ( !sh_get_ref(d, smfn, 0) )
- return 0;
+ return false;
sp[0]->u.sh.pinned = 1;
}
for ( i = pages; i > 0; i-- )
page_list_add(sp[i - 1], pin_list);
- return 1;
+ return true;
}
/* Unpin a shadow page: unset the pin bit, take the shadow off the list
unsigned long new,
unsigned int bytes,
struct sh_emulate_ctxt *sh_ctxt);
- bool_t (*write_guest_entry )(struct vcpu *v, intpte_t *p,
+ bool (*write_guest_entry )(struct vcpu *v, intpte_t *p,
intpte_t new, mfn_t gmfn);
- bool_t (*cmpxchg_guest_entry )(struct vcpu *v, intpte_t *p,
+ bool (*cmpxchg_guest_entry )(struct vcpu *v, intpte_t *p,
intpte_t *old, intpte_t new,
mfn_t gmfn);
mfn_t (*make_monitor_table )(struct vcpu *v);
struct paging_mode {
int (*page_fault )(struct vcpu *v, unsigned long va,
struct cpu_user_regs *regs);
- bool_t (*invlpg )(struct vcpu *v, unsigned long va);
+ bool (*invlpg )(struct vcpu *v, unsigned long va);
unsigned long (*gva_to_gfn )(struct vcpu *v,
struct p2m_domain *p2m,
unsigned long va,
}
-/* Write a new value into the guest pagetable, and update the
- * paging-assistance state appropriately. Returns 0 if we page-faulted,
- * 1 for success. */
-static inline bool_t paging_write_guest_entry(struct vcpu *v, intpte_t *p,
- intpte_t new, mfn_t gmfn)
+/*
+ * Write a new value into the guest pagetable, and update the
+ * paging-assistance state appropriately. Returns false if we page-faulted,
+ * true for success.
+ */
+static inline bool paging_write_guest_entry(
+ struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
{
#ifdef CONFIG_SHADOW_PAGING
if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
}
-/* Cmpxchg a new value into the guest pagetable, and update the
- * paging-assistance state appropriately. Returns 0 if we page-faulted,
- * 1 if not. N.B. caller should check the value of "old" to see if the
- * cmpxchg itself was successful. */
-static inline bool_t paging_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,
- intpte_t *old, intpte_t new,
- mfn_t gmfn)
+/*
+ * Cmpxchg a new value into the guest pagetable, and update the
+ * paging-assistance state appropriately. Returns false if we page-faulted,
+ * true if not. N.B. caller should check the value of "old" to see if the
+ * cmpxchg itself was successful.
+ */
+static inline bool paging_cmpxchg_guest_entry(
+ struct vcpu *v, intpte_t *p, intpte_t *old, intpte_t new, mfn_t gmfn)
{
#ifdef CONFIG_SHADOW_PAGING
if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
- bool_t fast, bool_t all) {}
+ int fast, int all) {}
static inline void shadow_blow_tables_per_domain(struct domain *d) {}