memset(d->arch.pirq_irq, 0,
d->nr_pirqs * sizeof(*d->arch.pirq_irq));
- d->arch.irq_pirq = xmalloc_array(int, nr_irqs);
- if ( !d->arch.irq_pirq )
+ if ( (rc = init_domain_irq_mapping(d)) != 0 )
goto fail;
- memset(d->arch.irq_pirq, 0,
- nr_irqs * sizeof(*d->arch.irq_pirq));
-
- for ( i = 1; platform_legacy_irq(i); ++i )
- if ( !IO_APIC_IRQ(i) )
- d->arch.irq_pirq[i] = d->arch.pirq_irq[i] = i;
if ( is_hvm_domain(d) )
{
d->arch.pirq_emuirq = xmalloc_array(int, d->nr_pirqs);
- d->arch.emuirq_pirq = xmalloc_array(int, nr_irqs);
- if ( !d->arch.pirq_emuirq || !d->arch.emuirq_pirq )
+ if ( !d->arch.pirq_emuirq )
goto fail;
for (i = 0; i < d->nr_pirqs; i++)
d->arch.pirq_emuirq[i] = IRQ_UNBOUND;
- for (i = 0; i < nr_irqs; i++)
- d->arch.emuirq_pirq[i] = IRQ_UNBOUND;
}
d->is_dying = DOMDYING_dead;
vmce_destroy_msr(d);
xfree(d->arch.pirq_irq);
- xfree(d->arch.irq_pirq);
xfree(d->arch.pirq_emuirq);
- xfree(d->arch.emuirq_pirq);
+ cleanup_domain_irq_mapping(d);
free_xenheap_page(d->shared_info);
if ( paging_initialised )
paging_final_teardown(d);
free_xenheap_page(d->shared_info);
xfree(d->arch.pirq_irq);
- xfree(d->arch.irq_pirq);
xfree(d->arch.pirq_emuirq);
- xfree(d->arch.emuirq_pirq);
+ cleanup_domain_irq_mapping(d);
}
unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
return desc;
}
+static int set_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+ int err = radix_tree_insert(&d->arch.irq_pirq, irq, (void *)(long)pirq,
+ NULL, NULL);
+
+ switch ( err )
+ {
+ case -EEXIST:
+ *radix_tree_lookup_slot(&d->arch.irq_pirq, irq) = (void *)(long)pirq;
+ /* fall through */
+ case 0:
+ d->arch.pirq_irq[pirq] = irq;
+ return 0;
+ }
+
+ return err;
+}
+
+static void clear_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+ d->arch.pirq_irq[pirq] = 0;
+ radix_tree_delete(&d->arch.irq_pirq, irq, NULL);
+}
+
+int init_domain_irq_mapping(struct domain *d)
+{
+ unsigned int i;
+ int err;
+
+ INIT_RADIX_TREE(&d->arch.irq_pirq, 0);
+ if ( is_hvm_domain(d) )
+ INIT_RADIX_TREE(&d->arch.hvm_domain.emuirq_pirq, 0);
+
+ for ( i = 1, err = 0; !err && platform_legacy_irq(i); ++i )
+ if ( !IO_APIC_IRQ(i) )
+ err = set_domain_irq_pirq(d, i, i);
+
+ return err;
+}
+
+static void irq_slot_free(void *unused)
+{
+}
+
+void cleanup_domain_irq_mapping(struct domain *d)
+{
+ radix_tree_destroy(&d->arch.irq_pirq, irq_slot_free, NULL);
+ if ( is_hvm_domain(d) )
+ radix_tree_destroy(&d->arch.hvm_domain.emuirq_pirq,
+ irq_slot_free, NULL);
+}
+
/* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
static void flush_ready_eoi(void)
{
BUG_ON(irq <= 0);
desc = irq_to_desc(irq);
spin_lock_irq(&desc->lock);
- d->arch.pirq_irq[pirq] = d->arch.irq_pirq[irq] = 0;
+ clear_domain_irq_pirq(d, irq, pirq);
}
else
{
dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
d->domain_id, irq);
desc->handler = &pci_msi_type;
- d->arch.pirq_irq[pirq] = irq;
- d->arch.irq_pirq[irq] = pirq;
- setup_msi_irq(pdev, msi_desc, irq);
- spin_unlock_irqrestore(&desc->lock, flags);
- } else
+ ret = set_domain_irq_pirq(d, irq, pirq);
+ if ( !ret )
+ {
+ setup_msi_irq(pdev, msi_desc, irq);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+ else
+ {
+ desc->handler = &no_irq_type;
+ spin_unlock_irqrestore(&desc->lock, flags);
+ pci_disable_msi(msi_desc);
+ }
+ }
+ else
{
spin_lock_irqsave(&desc->lock, flags);
- d->arch.pirq_irq[pirq] = irq;
- d->arch.irq_pirq[irq] = pirq;
+ ret = set_domain_irq_pirq(d, irq, pirq);
spin_unlock_irqrestore(&desc->lock, flags);
}
BUG_ON(irq != domain_pirq_to_irq(d, pirq));
if ( !forced_unbind )
- {
- d->arch.pirq_irq[pirq] = 0;
- d->arch.irq_pirq[irq] = 0;
- }
+ clear_domain_irq_pirq(d, irq, pirq);
else
{
d->arch.pirq_irq[pirq] = -irq;
- d->arch.irq_pirq[irq] = -pirq;
+ *radix_tree_lookup_slot(&d->arch.irq_pirq, irq) = (void *)(long)-pirq;
}
spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
- d->arch.pirq_emuirq[pirq] = emuirq;
/* do not store emuirq mappings for pt devices */
if ( emuirq != IRQ_PT )
- d->arch.emuirq_pirq[emuirq] = pirq;
+ {
+ int err = radix_tree_insert(&d->arch.hvm_domain.emuirq_pirq, emuirq,
+ (void *)((long)pirq + 1), NULL, NULL);
+
+ switch ( err )
+ {
+ case 0:
+ break;
+ case -EEXIST:
+ *radix_tree_lookup_slot(&d->arch.hvm_domain.emuirq_pirq, emuirq) =
+ (void *)((long)pirq + 1);
+ break;
+ default:
+ return err;
+ }
+ }
+ d->arch.pirq_emuirq[pirq] = emuirq;
return 0;
}
d->arch.pirq_emuirq[pirq] = IRQ_UNBOUND;
if ( emuirq != IRQ_PT )
- d->arch.emuirq_pirq[emuirq] = IRQ_UNBOUND;
+ radix_tree_delete(&d->arch.hvm_domain.emuirq_pirq, emuirq, NULL);
done:
return ret;
* o tagging code removed
* o radix_tree_insert has func parameter for dynamic data struct allocation
* o radix_tree_destroy added (including recursive helper function)
- * o __init functions must be called explicitly
* o other include files adapted to Xen
*/
#include <xen/lib.h>
#include <xen/types.h>
#include <xen/errno.h>
+#include <xen/xmalloc.h>
#include <xen/radix-tree.h>
#include <asm/cache.h>
return height_to_maxindex[height];
}
+static struct radix_tree_node *_node_alloc(void *unused)
+{
+ struct radix_tree_node *node = xmalloc(struct radix_tree_node);
+
+ return node ? memset(node, 0, sizeof(*node)) : node;
+}
+
+static void _node_free(struct radix_tree_node *node)
+{
+ xfree(node);
+}
+
/*
* Extend a radix tree so it can store key @index.
*/
int offset;
int error;
+ if (!node_alloc)
+ node_alloc = _node_alloc;
+
/* Make sure the tree is high enough. */
if (index > radix_tree_maxindex(root->height)) {
error = radix_tree_extend(root, index, node_alloc, arg);
unsigned int height, shift;
int offset;
+ if (!node_free)
+ node_free = _node_free;
+
height = root->height;
if (index > radix_tree_maxindex(height))
goto out;
if (root->height == 0)
slot_free(root->rnode);
else {
+ if (!node_free)
+ node_free = _node_free;
radix_tree_node_destroy(root->rnode, root->height,
slot_free, node_free);
node_free(root->rnode);
return index;
}
-void __init radix_tree_init(void)
+static int __init radix_tree_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
height_to_maxindex[i] = __maxindex(i);
+
+ return 0;
}
+/* pre-SMP just so it runs before 'normal' initcalls */
+presmp_initcall(radix_tree_init);
if ( !tmh_enabled() )
return 0;
- radix_tree_init();
if ( tmh_dedup_enabled() )
for (i = 0; i < 256; i++ )
{
#include <xen/config.h>
#include <xen/mm.h>
+#include <xen/radix-tree.h>
#include <asm/hvm/vcpu.h>
#include <asm/hvm/domain.h>
#include <asm/e820.h>
const char *nested_p2m_function;
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
- int *irq_pirq;
+ struct radix_tree_root irq_pirq;
int *pirq_irq;
- /* pirq to emulated irq and vice versa */
- int *emuirq_pirq;
+ /* pirq to emulated irq */
int *pirq_emuirq;
/* Maximum physical-address bitwidth supported by this guest. */
/* VCPU which is current target for 8259 interrupts. */
struct vcpu *i8259_target;
+ /* emulated irq to pirq */
+ struct radix_tree_root emuirq_pirq;
+
/* hvm_print_line() logging. */
#define HVM_PBUF_SIZE 80
char *pbuf;
void irq_set_affinity(struct irq_desc *, const cpumask_t *mask);
+int init_domain_irq_mapping(struct domain *);
+void cleanup_domain_irq_mapping(struct domain *);
+
#define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
-#define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
+#define domain_irq_to_pirq(d, irq) \
+ ((long)radix_tree_lookup(&(d)->arch.irq_pirq, irq))
#define PIRQ_ALLOCATED -1
#define domain_pirq_to_emuirq(d, pirq) ((d)->arch.pirq_emuirq[pirq])
-#define domain_emuirq_to_pirq(d, emuirq) ((d)->arch.emuirq_pirq[emuirq])
+#define domain_emuirq_to_pirq(d, emuirq) \
+ (((long)radix_tree_lookup(&(d)->arch.hvm_domain.emuirq_pirq, emuirq) ?: \
+ IRQ_UNBOUND + 1) - 1)
#define IRQ_UNBOUND -1
#define IRQ_PT -2
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items);
-void radix_tree_init(void);
#endif /* _XEN_RADIX_TREE_H */