struct mtx balloon_mutex;
/* We increase/decrease in batches which fit in a page */
-static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
+static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
struct balloon_stats {
/* We aim for 'current allocation' == 'target allocation'. */
static int
increase_reservation(unsigned long nr_pages)
{
- unsigned long pfn, i;
+ unsigned long i;
vm_page_t page;
long rc;
struct xen_memory_reservation reservation = {
TAILQ_REMOVE(&ballooned_pages, page, plinks.q);
bs.balloon_low--;
- pfn = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT);
KASSERT(xen_feature(XENFEAT_auto_translated_physmap),
("auto translated physmap but mapping is valid"));
static int
decrease_reservation(unsigned long nr_pages)
{
- unsigned long pfn, i;
+ unsigned long i;
vm_page_t page;
int need_sleep = 0;
int ret;
pmap_zero_page(page);
}
- pfn = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT);
- frame_list[i] = pfn;
+ frame_list[i] = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT);
TAILQ_INSERT_HEAD(&ballooned_pages, page, plinks.q);
bs.balloon_low++;
if (__predict_false(map->status != 0)) {
DPRINTF("invalid buffer -- could not remap "
"it (%d)\n", map->status);
- DPRINTF("Mapping(%d): Host Addr 0x%lx, flags "
+ DPRINTF("Mapping(%d): Host Addr 0x%"PRIx64", flags "
"0x%x ref 0x%x, dom %d\n", seg_idx,
map->host_addr, map->flags, map->ref,
map->dom);
{
device_set_desc(dev, "Xen Control Device");
- return (0);
+ return (BUS_PROBE_NOWILDCARD);
}
/**
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include "opt_pmap.h"
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/limits.h>
#include <sys/rman.h>
#include <machine/resource.h>
+#include <machine/cpu.h>
#include <xen/xen-os.h>
#include <xen/hypervisor.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
-#define cmpxchg(a, b, c) atomic_cmpset_int((volatile u_int *)(a),(b),(c))
-
/* External tools reserve first few grant table entries. */
#define NR_RESERVED_ENTRIES 8
#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t))
while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
if ( synch_cmpxchg(&shared[ref].flags, flags, 0) == flags )
return (0);
- cpu_relax();
+ cpu_spinwait();
}
/* If a transfer is in progress then wait until it is completed. */
while (!(flags & GTF_transfer_completed)) {
flags = shared[ref].flags;
- cpu_relax();
+ cpu_spinwait();
}
/* Read the frame number /after/ reading completion status. */
if (entry->flags & GNTCOPY_dest_gref)
printf("gnttab dest ref=\t%u\n", entry->dest.u.ref);
else
- printf("gnttab dest gmfn=\t%lu\n", entry->dest.u.gmfn);
+ printf("gnttab dest gmfn=\t%"PRI_xen_pfn"\n",
+ entry->dest.u.gmfn);
printf("gnttab dest offset=\t%hu\n", entry->dest.offset);
printf("gnttab dest domid=\t%hu\n", entry->dest.domid);
if (entry->flags & GNTCOPY_source_gref)
printf("gnttab source ref=\t%u\n", entry->source.u.ref);
else
- printf("gnttab source gmfn=\t%lu\n", entry->source.u.gmfn);
+ printf("gnttab source gmfn=\t%"PRI_xen_pfn"\n",
+ entry->source.u.gmfn);
printf("gnttab source offset=\t%hu\n", entry->source.offset);
printf("gnttab source domid=\t%hu\n", entry->source.domid);
printf("gnttab len=\t%hu\n", entry->len);
int xn_if_flags;
struct callout xn_stat_ch;
- u_long rx_pfn_array[NET_RX_RING_SIZE];
+ xen_pfn_t rx_pfn_array[NET_RX_RING_SIZE];
struct ifmedia sc_media;
bool xn_resume;
#include <xen/features.h>
#include <xen/hypervisor.h>
#include <xen/hvm.h>
+#include <xen/xen_intr.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/xen/xenpci/xenpcivar.h>
-extern void xen_intr_handle_upcall(struct trapframe *trap_frame);
-
/*
* This is used to find our platform device instance.
*/
* Unconditionally return success.
*/
device_set_desc(dev, "XenStore");
- return (0);
+ return (BUS_PROBE_NOWILDCARD);
}
static void
void lapic_handle_error(void);
void lapic_handle_intr(int vector, struct trapframe *frame);
void lapic_handle_timer(struct trapframe *frame);
-void xen_intr_handle_upcall(struct trapframe *frame);
void hv_vector_handler(struct trapframe *frame);
extern int x2apic_mode;
#ifndef _MACHINE_X86_XEN_XEN_OS_H_
#define _MACHINE_X86_XEN_XEN_OS_H_
-#ifdef PAE
-#define CONFIG_X86_PAE
-#endif
-
/* Everything below this point is not included by assembler (.S) files. */
#ifndef __ASSEMBLY__
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
-{
- __asm__ __volatile__ ( "rep;nop" : : : "memory" );
-}
-#define cpu_relax() rep_nop()
-
-/* This is a barrier for the compiler only, NOT the processor! */
-#define barrier() __asm__ __volatile__("": : :"memory")
-
-#define LOCK_PREFIX ""
-#define LOCK ""
-#define ADDR (*(volatile long *) addr)
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __inline int test_and_clear_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"Ir" (nr) : "memory");
- return oldbit;
-}
-
-static __inline int constant_test_bit(int nr, const volatile void * addr)
-{
- return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline int variable_test_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__(
- "btl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit)
- :"m" (ADDR),"Ir" (nr));
- return oldbit;
-}
-
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
-
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static __inline__ void set_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__( LOCK_PREFIX
- "btsl %1,%0"
- :"=m" (ADDR)
- :"Ir" (nr));
-}
-
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
- * in order to ensure changes are visible on other processors.
- */
-static __inline__ void clear_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %1,%0"
- :"=m" (ADDR)
- :"Ir" (nr));
-}
-
#endif /* !__ASSEMBLY__ */
#endif /* _MACHINE_X86_XEN_XEN_OS_H_ */
static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
+#define ENABLED_SETSIZE (sizeof(u_long) * 8)
+BITSET_DEFINE(enabledbits, ENABLED_SETSIZE)
+
/**
* Per-cpu event channel processing state.
*/
* A bitmap of ports that can be serviced from this CPU.
* A set bit means interrupt handling is enabled.
*/
- u_long evtchn_enabled[sizeof(u_long) * 8];
+ struct enabledbits evtchn_enabled;
};
/*
* Start the scan at port 0 by initializing the last scanned
* location as the highest numbered event channel port.
*/
-DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
+static DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
.last_processed_l1i = LONG_BIT - 1,
.last_processed_l2i = LONG_BIT - 1
};
struct xen_intr_pcpu_data *pcpu;
pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
- clear_bit(port, pcpu->evtchn_enabled);
+ BIT_CLR_ATOMIC(ENABLED_SETSIZE, port, &pcpu->evtchn_enabled);
}
/**
struct xen_intr_pcpu_data *pcpu;
pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
- set_bit(port, pcpu->evtchn_enabled);
+ BIT_SET_ATOMIC(ENABLED_SETSIZE, port, &pcpu->evtchn_enabled);
}
/**
{
return (sh->evtchn_pending[idx]
& ~sh->evtchn_mask[idx]
- & pcpu->evtchn_enabled[idx]);
+ & pcpu->evtchn_enabled.__bits[idx]);
}
/**
*/
CPU_FOREACH(i) {
pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
- memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
- sizeof(pcpu->evtchn_enabled));
+ if (i == 0)
+ BIT_FILL(ENABLED_SETSIZE, &pcpu->evtchn_enabled);
+ else
+ BIT_ZERO(ENABLED_SETSIZE, &pcpu->evtchn_enabled);
xen_intr_intrcnt_add(i);
}
struct xen_intr_pcpu_data *pcpu;
pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
- memset(pcpu->evtchn_enabled,
- i == 0 ? ~0 : 0, sizeof(pcpu->evtchn_enabled));
+
+ if (i == 0)
+ BIT_FILL(ENABLED_SETSIZE, &pcpu->evtchn_enabled);
+ else
+ BIT_ZERO(ENABLED_SETSIZE, &pcpu->evtchn_enabled);
}
/* Mask all event channels. */
isrc = (struct xenisrc *)base_isrc;
- if (test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) {
+ if (xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) {
struct physdev_eoi eoi = { .irq = isrc->xi_pirq };
error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
* Since the dynamic PIRQ EOI map is not available
* mark the PIRQ as needing EOI unconditionally.
*/
- set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map);
+ xen_set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map);
}
}
db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d "
"NeedsEOI: %d\n",
isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger,
- !!test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map));
+ !!xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map));
}
if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
db_printf("\tVirq: %d\n", isrc->xi_virq);
db_printf("\tMasked: %d Pending: %d\n",
- !!test_bit(isrc->xi_port, &s->evtchn_mask[0]),
- !!test_bit(isrc->xi_port, &s->evtchn_pending[0]));
+ !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
+ !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
db_printf("\tPer-CPU Masks: ");
CPU_FOREACH(i) {
pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
db_printf("cpu#%d: %d ", i,
- !!test_bit(isrc->xi_port, pcpu->evtchn_enabled));
+ BIT_ISSET(ENABLED_SETSIZE, isrc->xi_port,
+ &pcpu->evtchn_enabled));
}
db_printf("\n");
}
dst->handle = src->handle;
dst->id = src->id;
dst->sector_number = src->sector_number;
- barrier();
+ __compiler_membar();
if (n > dst->nr_segments)
n = dst->nr_segments;
for (i = 0; i < n; i++)
dst->handle = src->handle;
dst->id = src->id;
dst->sector_number = src->sector_number;
- barrier();
+ __compiler_membar();
if (n > dst->nr_segments)
n = dst->nr_segments;
for (i = 0; i < n; i++)
#ifndef __XEN_HYPERVISOR_H__
#define __XEN_HYPERVISOR_H__
-#ifdef XENHVM
-
-#define is_running_on_xen() (HYPERVISOR_shared_info != NULL)
-
-#else
-
-#define is_running_on_xen() 1
-
-#endif
-
-#ifdef PAE
-#ifndef CONFIG_X86_PAE
-#define CONFIG_X86_PAE
-#endif
-#endif
-
#include <sys/cdefs.h>
#include <sys/systm.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
#include <machine/xen/hypercall.h>
-#if defined(__amd64__)
-#define MULTI_UVMFLAGS_INDEX 2
-#define MULTI_UVMDOMID_INDEX 3
-#else
-#define MULTI_UVMFLAGS_INDEX 3
-#define MULTI_UVMDOMID_INDEX 4
-#endif
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
-#else
-#define is_initial_xendomain() 0
-#endif
-
-extern start_info_t *xen_start_info;
-
extern uint64_t get_system_time(int ticks);
static inline int
return HYPERVISOR_console_io(CONSOLEIO_write, count, str);
}
-static inline void HYPERVISOR_crash(void) __dead2;
-
static inline int
HYPERVISOR_yield(void)
{
return (rc);
}
-static inline void
-MULTI_update_va_mapping(
- multicall_entry_t *mcl, unsigned long va,
- uint64_t new_val, unsigned long flags)
-{
- mcl->op = __HYPERVISOR_update_va_mapping;
- mcl->args[0] = va;
-#if defined(__amd64__)
- mcl->args[1] = new_val;
-#elif defined(PAE)
- mcl->args[1] = (uint32_t)(new_val & 0xffffffff) ;
- mcl->args[2] = (uint32_t)(new_val >> 32);
-#else
- mcl->args[1] = new_val;
- mcl->args[2] = 0;
-#endif
- mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
-}
-
#endif /* __XEN_HYPERVISOR_H__ */
/* Everything below this point is not included by assembler (.S) files. */
#ifndef __ASSEMBLY__
-/* Force a proper event-channel callback from Xen. */
-void force_evtchn_callback(void);
-
extern shared_info_t *HYPERVISOR_shared_info;
extern start_info_t *HYPERVISOR_start_info;
(HYPERVISOR_start_info->flags & SIF_INITDOMAIN) != 0);
}
+/*
+ * Based on ofed/include/linux/bitops.h
+ *
+ * Those helpers are prefixed by xen_ because xen-os.h is widely included
+ * and we don't want the other drivers using them.
+ *
+ */
+#define NBPL (NBBY * sizeof(long))
+
+static inline bool
+xen_test_bit(int bit, volatile long *addr)
+{
+ unsigned long mask = 1UL << (bit % NBPL);
+
+ return !!(atomic_load_acq_long(&addr[bit / NBPL]) & mask);
+}
+
+static inline void
+xen_set_bit(int bit, volatile long *addr)
+{
+ atomic_set_long(&addr[bit / NBPL], 1UL << (bit % NBPL));
+}
+
+#undef NPBL
+
/*
* Functions to allocate/free unused memory in order
* to map memory from other domains.
/** If non-zero, the hypervisor has been configured to use a direct vector */
extern int xen_vector_callback_enabled;
+void xen_intr_handle_upcall(struct trapframe *trap_frame);
+
/**
* Associate an already allocated local event channel port an interrupt
* handler.