ia64/xen-unstable

changeset 10156:7c406cefc1aa

[IA64] remove evtchn_ia64.c

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Thu May 25 15:38:16 2006 -0600 (2006-05-25)
parents ceaae8fbe3f7
children faae893d428e
files linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c	Wed May 24 16:44:20 2006 -0600
     1.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.3 @@ -1,261 +0,0 @@
     1.4 -/* NOTE: This file split off from evtchn.c because there was
     1.5 -   some discussion that the mechanism is sufficiently different.
     1.6 -   It may be possible to merge it back in the future... djm */
     1.7 -#include <linux/config.h>
     1.8 -#include <linux/kernel.h>
     1.9 -#include <asm/hw_irq.h>
    1.10 -#include <xen/evtchn.h>
    1.11 -
    1.12 -#define MAX_EVTCHN 1024
    1.13 -
    1.14 -/* Xen will never allocate port zero for any purpose. */
    1.15 -#define VALID_EVTCHN(_chn) (((_chn) != 0) && ((_chn) < MAX_EVTCHN))
    1.16 -
    1.17 -/* Binding types. Hey, only IRQT_VIRQ and IRQT_EVTCHN are supported now
    1.18 - * for XEN/IA64 - ktian1
    1.19 - */
    1.20 -enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
    1.21 -
    1.22 -/* Constructor for packed IRQ information. */
    1.23 -#define mk_irq_info(type, index, evtchn)				\
    1.24 -	(((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
    1.25 -/* Convenient shorthand for packed representation of an unbound IRQ. */
    1.26 -#define IRQ_UNBOUND	mk_irq_info(IRQT_UNBOUND, 0, 0)
    1.27 -/* Accessor macros for packed IRQ information. */
    1.28 -#define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
    1.29 -#define index_from_irq(irq)  ((u8)(irq_info[irq] >> 16))
    1.30 -#define type_from_irq(irq)   ((u8)(irq_info[irq] >> 24))
    1.31 -
    1.32 -/* Packed IRQ information: binding type, sub-type index, and event channel. */
    1.33 -static u32 irq_info[NR_IRQS];
    1.34 -
    1.35 -/* One note for XEN/IA64 is that we have all event channels bound to one
    1.36 - * physical irq vector. So we always mean evtchn vector identical to 'irq'
    1.37 - * vector in this context. - ktian1
    1.38 - */
    1.39 -static struct {
    1.40 -	irqreturn_t (*handler)(int, void *, struct pt_regs *);
    1.41 -	void *dev_id;
    1.42 -	char opened;	/* Whether allocated */
    1.43 -} evtchns[MAX_EVTCHN];
    1.44 -
    1.45 -/*
    1.46 - * This lock protects updates to the following mapping and reference-count
    1.47 - * arrays. The lock does not need to be acquired to read the mapping tables.
    1.48 - */
    1.49 -static spinlock_t irq_mapping_update_lock;
    1.50 -
    1.51 -void mask_evtchn(int port)
    1.52 -{
    1.53 -	shared_info_t *s = HYPERVISOR_shared_info;
    1.54 -	synch_set_bit(port, &s->evtchn_mask[0]);
    1.55 -}
    1.56 -EXPORT_SYMBOL(mask_evtchn);
    1.57 -
    1.58 -void unmask_evtchn(int port)
    1.59 -{
    1.60 -	shared_info_t *s = HYPERVISOR_shared_info;
    1.61 -	unsigned int cpu = smp_processor_id();
    1.62 -	vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
    1.63 -
    1.64 -#if 0	// FIXME: diverged from x86 evtchn.c
    1.65 -	/* Slow path (hypercall) if this is a non-local port. */
    1.66 -	if (unlikely(cpu != cpu_from_evtchn(port))) {
    1.67 -		struct evtchn_unmask op = { .port = port };
    1.68 -		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op);
    1.69 -		return;
    1.70 -	}
    1.71 -#endif
    1.72 -
    1.73 -	synch_clear_bit(port, &s->evtchn_mask[0]);
    1.74 -
    1.75 -	/*
    1.76 -	 * The following is basically the equivalent of 'hw_resend_irq'. Just
    1.77 -	 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
    1.78 -	 * masked.
    1.79 -	 */
    1.80 -	if (synch_test_bit(port, &s->evtchn_pending[0]) && 
    1.81 -	    !synch_test_and_set_bit(port / BITS_PER_LONG,
    1.82 -				    &vcpu_info->evtchn_pending_sel)) {
    1.83 -		vcpu_info->evtchn_upcall_pending = 1;
    1.84 -		if (!vcpu_info->evtchn_upcall_mask)
    1.85 -			force_evtchn_callback();
    1.86 -	}
    1.87 -}
    1.88 -EXPORT_SYMBOL(unmask_evtchn);
    1.89 -
    1.90 -
    1.91 -#define unbound_irq(e) (VALID_EVTCHN(e) && (!evtchns[(e)].opened))
    1.92 -int bind_virq_to_irqhandler(
    1.93 -	unsigned int virq,
    1.94 -	unsigned int cpu,
    1.95 -	irqreturn_t (*handler)(int, void *, struct pt_regs *),
    1.96 -	unsigned long irqflags,
    1.97 -	const char *devname,
    1.98 -	void *dev_id)
    1.99 -{
   1.100 -    struct evtchn_bind_virq bind_virq;
   1.101 -    int evtchn;
   1.102 -
   1.103 -    spin_lock(&irq_mapping_update_lock);
   1.104 -
   1.105 -    bind_virq.virq = virq;
   1.106 -    bind_virq.vcpu = cpu;
   1.107 -    if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0)
   1.108 -        BUG();
   1.109 -    evtchn = bind_virq.port;
   1.110 -
   1.111 -    if (!unbound_irq(evtchn)) {
   1.112 -        evtchn = -EINVAL;
   1.113 -        goto out;
   1.114 -    }
   1.115 -
   1.116 -    evtchns[evtchn].handler = handler;
   1.117 -    evtchns[evtchn].dev_id = dev_id;
   1.118 -    evtchns[evtchn].opened = 1;
   1.119 -    irq_info[evtchn] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
   1.120 -
   1.121 -    unmask_evtchn(evtchn);
   1.122 -out:
   1.123 -    spin_unlock(&irq_mapping_update_lock);
   1.124 -    return evtchn;
   1.125 -}
   1.126 -
   1.127 -int bind_evtchn_to_irqhandler(unsigned int evtchn,
   1.128 -                   irqreturn_t (*handler)(int, void *, struct pt_regs *),
   1.129 -                   unsigned long irqflags, const char * devname, void *dev_id)
   1.130 -{
   1.131 -    spin_lock(&irq_mapping_update_lock);
   1.132 -
   1.133 -    if (!unbound_irq(evtchn)) {
   1.134 -	evtchn = -EINVAL;
   1.135 -	goto out;
   1.136 -    }
   1.137 -
   1.138 -    evtchns[evtchn].handler = handler;
   1.139 -    evtchns[evtchn].dev_id = dev_id;
   1.140 -    evtchns[evtchn].opened = 1;
   1.141 -    irq_info[evtchn] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
   1.142 -
   1.143 -    unmask_evtchn(evtchn);
   1.144 -out:
   1.145 -    spin_unlock(&irq_mapping_update_lock);
   1.146 -    return evtchn;
   1.147 -}
   1.148 -
   1.149 -int bind_ipi_to_irqhandler(
   1.150 -	unsigned int ipi,
   1.151 -	unsigned int cpu,
   1.152 -	irqreturn_t (*handler)(int, void *, struct pt_regs *),
   1.153 -	unsigned long irqflags,
   1.154 -	const char *devname,
   1.155 -	void *dev_id)
   1.156 -{
   1.157 -    printk("%s is called which has not been supported now...?\n", __FUNCTION__);
   1.158 -    while(1);
   1.159 -}
   1.160 -
   1.161 -void unbind_from_irqhandler(unsigned int irq, void *dev_id)
   1.162 -{
   1.163 -    struct evtchn_close close;
   1.164 -    int evtchn = evtchn_from_irq(irq);
   1.165 -
   1.166 -    spin_lock(&irq_mapping_update_lock);
   1.167 -
   1.168 -    if (unbound_irq(irq))
   1.169 -        goto out;
   1.170 -
   1.171 -    close.port = evtchn;
   1.172 -    if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
   1.173 -        BUG();
   1.174 -
   1.175 -    switch (type_from_irq(irq)) {
   1.176 -	case IRQT_VIRQ:
   1.177 -	    /* Add smp stuff later... */
   1.178 -	    break;
   1.179 -	case IRQT_IPI:
   1.180 -	    /* Add smp stuff later... */
   1.181 -	    break;
   1.182 -	default:
   1.183 -	    break;
   1.184 -    }
   1.185 -
   1.186 -    mask_evtchn(evtchn);
   1.187 -    evtchns[evtchn].handler = NULL;
   1.188 -    evtchns[evtchn].opened = 0;
   1.189 -
   1.190 -out:
   1.191 -    spin_unlock(&irq_mapping_update_lock);
   1.192 -}
   1.193 -
   1.194 -void notify_remote_via_irq(int irq)
   1.195 -{
   1.196 -	int evtchn = evtchn_from_irq(irq);
   1.197 -
   1.198 -	if (!unbound_irq(evtchn))
   1.199 -		notify_remote_via_evtchn(evtchn);
   1.200 -}
   1.201 -
   1.202 -irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
   1.203 -{
   1.204 -    unsigned long  l1, l2;
   1.205 -    unsigned int   l1i, l2i, port;
   1.206 -    irqreturn_t (*handler)(int, void *, struct pt_regs *);
   1.207 -    shared_info_t *s = HYPERVISOR_shared_info;
   1.208 -    vcpu_info_t   *vcpu_info = &s->vcpu_info[smp_processor_id()];
   1.209 -
   1.210 -    vcpu_info->evtchn_upcall_mask = 1;
   1.211 -    vcpu_info->evtchn_upcall_pending = 0;
   1.212 -
   1.213 -    /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
   1.214 -    l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
   1.215 -    while ( l1 != 0 )
   1.216 -    {
   1.217 -        l1i = __ffs(l1);
   1.218 -        l1 &= ~(1UL << l1i);
   1.219 -
   1.220 -        while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
   1.221 -        {
   1.222 -            l2i = __ffs(l2);
   1.223 -            l2 &= ~(1UL << l2i);
   1.224 -
   1.225 -            port = (l1i * BITS_PER_LONG) + l2i;
   1.226 -            if ( (handler = evtchns[port].handler) != NULL )
   1.227 -	    {
   1.228 -		clear_evtchn(port);
   1.229 -                handler(port, evtchns[port].dev_id, regs);
   1.230 -	    }
   1.231 -            else
   1.232 -	    {
   1.233 -                evtchn_device_upcall(port);
   1.234 -	    }
   1.235 -        }
   1.236 -    }
   1.237 -    vcpu_info->evtchn_upcall_mask = 0;
   1.238 -    return IRQ_HANDLED;
   1.239 -}
   1.240 -
   1.241 -void force_evtchn_callback(void)
   1.242 -{
   1.243 -	//(void)HYPERVISOR_xen_version(0, NULL);
   1.244 -}
   1.245 -
   1.246 -static struct irqaction evtchn_irqaction = {
   1.247 -	.handler =	evtchn_interrupt,
   1.248 -	.flags =	SA_INTERRUPT,
   1.249 -	.name =		"xen-event-channel"
   1.250 -};
   1.251 -
   1.252 -static int evtchn_irq = 0xe9;
   1.253 -void __init evtchn_init(void)
   1.254 -{
   1.255 -    shared_info_t *s = HYPERVISOR_shared_info;
   1.256 -
   1.257 -    register_percpu_irq(evtchn_irq, &evtchn_irqaction);
   1.258 -
   1.259 -    s->arch.evtchn_vector = evtchn_irq;
   1.260 -    printk("xen-event-channel using irq %d\n", evtchn_irq);
   1.261 -
   1.262 -    spin_lock_init(&irq_mapping_update_lock);
   1.263 -    memset(evtchns, 0, sizeof(evtchns));
   1.264 -}