if ( !port_is_valid(ld, lport) )
return -EINVAL;
+ /*
+ * As the call further down needs to avoid allocations (due to running
+ * with IRQs off), give XSM a chance to pre-allocate if needed.
+ */
+ xsm_evtchn_send(XSM_HOOK, ld, NULL);
+
lchn = evtchn_from_port(ld, lport);
spin_lock_irqsave(&lchn->lock, flags);
int (*evtchn_interdomain) (struct domain *d1, struct evtchn *chn1,
struct domain *d2, struct evtchn *chn2);
void (*evtchn_close_post) (struct evtchn *chn);
+ /* Note: Next hook may be called with 'chn' set to NULL. See call site. */
int (*evtchn_send) (struct domain *d, struct evtchn *chn);
int (*evtchn_status) (struct domain *d, struct evtchn *chn);
int (*evtchn_reset) (struct domain *d1, struct domain *d2);
#include <xen/prefetch.h>
#include <xen/kernel.h>
#include <xen/sched.h>
+#include <xen/cpu.h>
#include <xen/init.h>
+#include <xen/percpu.h>
#include <xen/rcupdate.h>
#include <asm/atomic.h>
#include <asm/current.h>
return ecx;
}
+static struct avc_node *new_node(void)
+{
+ struct avc_node *node = xzalloc(struct avc_node);
+
+ if ( node )
+ {
+ INIT_RCU_HEAD(&node->rhead);
+ INIT_HLIST_NODE(&node->list);
+ avc_cache_stats_incr(allocations);
+ }
+
+ return node;
+}
+
+/*
+ * avc_has_perm_noaudit() may consume up to two nodes, which we may not be
+ * able to obtain from the allocator at that point. Since the is merely
+ * about caching earlier decisions, allow for (just) one pre-allocated node.
+ */
+static DEFINE_PER_CPU(struct avc_node *, prealloc_node);
+
+void avc_prealloc(void)
+{
+ struct avc_node **prealloc = &this_cpu(prealloc_node);
+
+ if ( !*prealloc )
+ *prealloc = new_node();
+}
+
+static int cpu_callback(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct avc_node **prealloc = &per_cpu(prealloc_node, cpu);
+
+ if ( action == CPU_DEAD && *prealloc )
+ {
+ xfree(*prealloc);
+ *prealloc = NULL;
+ avc_cache_stats_incr(frees);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback,
+ .priority = 99
+};
+
+static int __init cpu_nfb_init(void)
+{
+ register_cpu_notifier(&cpu_nfb);
+ return 0;
+}
+__initcall(cpu_nfb_init);
+
static struct avc_node *avc_alloc_node(void)
{
- struct avc_node *node;
+ struct avc_node *node, **prealloc = &this_cpu(prealloc_node);
- node = xzalloc(struct avc_node);
- if (!node)
- goto out;
+ node = *prealloc;
+ *prealloc = NULL;
- INIT_RCU_HEAD(&node->rhead);
- INIT_HLIST_NODE(&node->list);
- avc_cache_stats_incr(allocations);
+ if ( !node )
+ {
+ /* Must not call xmalloc() & Co with IRQs off. */
+ if ( !local_irq_is_enabled() )
+ goto out;
+ node = new_node();
+ if ( !node )
+ goto out;
+ }
atomic_inc(&avc_cache.active_nodes);
if ( atomic_read(&avc_cache.active_nodes) > avc_cache_threshold )
int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested,
struct avc_audit_data *auditdata);
+void avc_prealloc(void);
+
/* Exported to selinuxfs */
struct xen_flask_hash_stats;
int avc_get_hash_stats(struct xen_flask_hash_stats *arg);