ia64/xen-unstable
changeset 13374:5c5d9692f559
[HVM] Allow PV-on-HVM callback irq to be identified by PCI device.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Wed Jan 10 15:05:00 2007 +0000 (2007-01-10) |
parents | 36fd53b2e3b4 |
children | 160ff08f8b1f |
files | unmodified_drivers/linux-2.6/platform-pci/platform-pci.c unmodified_drivers/linux-2.6/platform-pci/platform-pci.h xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/irq.c xen/include/asm-x86/hvm/irq.h xen/include/public/hvm/params.h |
line diff
1.1 --- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Wed Jan 10 14:14:30 2007 +0000 1.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Wed Jan 10 15:05:00 2007 +0000 1.3 @@ -179,7 +179,7 @@ static int get_hypercall_stubs(void) 1.4 #define get_hypercall_stubs() (0) 1.5 #endif 1.6 1.7 -static int get_callback_irq(struct pci_dev *pdev) 1.8 +static uint64_t get_callback_via(struct pci_dev *pdev) 1.9 { 1.10 #ifdef __ia64__ 1.11 int irq; 1.12 @@ -189,16 +189,24 @@ static int get_callback_irq(struct pci_d 1.13 } 1.14 return 0; 1.15 #else /* !__ia64__ */ 1.16 - return pdev->irq; 1.17 + if (pdev->irq < 16) 1.18 + return pdev->irq; /* ISA IRQ */ 1.19 + /* We don't know the GSI. Specify the PCI INTx line instead. */ 1.20 + return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ 1.21 + ((uint64_t)pci_domain_nr(pdev->bus) << 32) | 1.22 + ((uint64_t)pdev->bus->number << 16) | 1.23 + ((uint64_t)(pdev->devfn & 0xff) << 8) | 1.24 + ((uint64_t)(pdev->pin - 1) & 3)); 1.25 #endif 1.26 } 1.27 1.28 static int __devinit platform_pci_init(struct pci_dev *pdev, 1.29 const struct pci_device_id *ent) 1.30 { 1.31 - int i, ret, callback_irq; 1.32 + int i, ret; 1.33 long ioaddr, iolen; 1.34 long mmio_addr, mmio_len; 1.35 + uint64_t callback_via; 1.36 1.37 i = pci_enable_device(pdev); 1.38 if (i) 1.39 @@ -210,9 +218,9 @@ static int __devinit platform_pci_init(s 1.40 mmio_addr = pci_resource_start(pdev, 1); 1.41 mmio_len = pci_resource_len(pdev, 1); 1.42 1.43 - callback_irq = get_callback_irq(pdev); 1.44 + callback_via = get_callback_via(pdev); 1.45 1.46 - if (mmio_addr == 0 || ioaddr == 0 || callback_irq == 0) { 1.47 + if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { 1.48 printk(KERN_WARNING DRV_NAME ":no resources found\n"); 1.49 return -ENOENT; 1.50 } 1.51 @@ -247,7 +255,7 @@ static int __devinit platform_pci_init(s 1.52 goto out; 1.53 } 1.54 1.55 - if ((ret = set_callback_irq(callback_irq))) 1.56 + if ((ret = set_callback_via(callback_via))) 1.57 goto out; 1.58 1.59 out: 1.60 @@ -297,7 +305,7 @@ static void __exit platform_pci_module_c 1.61 { 1.62 printk(KERN_INFO DRV_NAME ":Do platform module cleanup\n"); 1.63 /* disable hypervisor for callback irq */ 1.64 - set_callback_irq(0); 1.65 + set_callback_via(0); 1.66 if (pci_device_registered) 1.67 pci_unregister_driver(&platform_driver); 1.68 }
2.1 --- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h Wed Jan 10 14:14:30 2007 +0000 2.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h Wed Jan 10 15:05:00 2007 +0000 2.3 @@ -24,13 +24,13 @@ 2.4 #include <linux/interrupt.h> 2.5 #include <xen/interface/hvm/params.h> 2.6 2.7 -static inline int set_callback_irq(int irq) 2.8 +static inline int set_callback_via(uint64_t via) 2.9 { 2.10 struct xen_hvm_param a; 2.11 2.12 a.domid = DOMID_SELF; 2.13 a.index = HVM_PARAM_CALLBACK_IRQ; 2.14 - a.value = irq; 2.15 + a.value = via; 2.16 return HYPERVISOR_hvm_op(HVMOP_set_param, &a); 2.17 } 2.18
3.1 --- a/xen/arch/x86/hvm/hvm.c Wed Jan 10 14:14:30 2007 +0000 3.2 +++ b/xen/arch/x86/hvm/hvm.c Wed Jan 10 15:05:00 2007 +0000 3.3 @@ -800,7 +800,7 @@ long do_hvm_op(unsigned long op, XEN_GUE 3.4 d->arch.hvm_domain.buffered_io_va = (unsigned long)p; 3.5 break; 3.6 case HVM_PARAM_CALLBACK_IRQ: 3.7 - hvm_set_callback_gsi(d, a.value); 3.8 + hvm_set_callback_via(d, a.value); 3.9 break; 3.10 } 3.11 d->arch.hvm_domain.params[a.index] = a.value;
4.1 --- a/xen/arch/x86/hvm/irq.c Wed Jan 10 14:14:30 2007 +0000 4.2 +++ b/xen/arch/x86/hvm/irq.c Wed Jan 10 15:05:00 2007 +0000 4.3 @@ -25,7 +25,7 @@ 4.4 #include <xen/sched.h> 4.5 #include <asm/hvm/domain.h> 4.6 4.7 -void hvm_pci_intx_assert( 4.8 +static void __hvm_pci_intx_assert( 4.9 struct domain *d, unsigned int device, unsigned int intx) 4.10 { 4.11 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 4.12 @@ -33,10 +33,8 @@ void hvm_pci_intx_assert( 4.13 4.14 ASSERT((device <= 31) && (intx <= 3)); 4.15 4.16 - spin_lock(&hvm_irq->lock); 4.17 - 4.18 if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx) ) 4.19 - goto out; 4.20 + return; 4.21 4.22 gsi = hvm_pci_intx_gsi(device, intx); 4.23 if ( hvm_irq->gsi_assert_count[gsi]++ == 0 ) 4.24 @@ -50,12 +48,19 @@ void hvm_pci_intx_assert( 4.25 vioapic_irq_positive_edge(d, isa_irq); 4.26 vpic_irq_positive_edge(d, isa_irq); 4.27 } 4.28 +} 4.29 4.30 - out: 4.31 +void hvm_pci_intx_assert( 4.32 + struct domain *d, unsigned int device, unsigned int intx) 4.33 +{ 4.34 + struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 4.35 + 4.36 + spin_lock(&hvm_irq->lock); 4.37 + __hvm_pci_intx_assert(d, device, intx); 4.38 spin_unlock(&hvm_irq->lock); 4.39 } 4.40 4.41 -void hvm_pci_intx_deassert( 4.42 +static void __hvm_pci_intx_deassert( 4.43 struct domain *d, unsigned int device, unsigned int intx) 4.44 { 4.45 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 4.46 @@ -63,10 +68,8 @@ void hvm_pci_intx_deassert( 4.47 4.48 ASSERT((device <= 31) && (intx <= 3)); 4.49 4.50 - spin_lock(&hvm_irq->lock); 4.51 - 4.52 if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx) ) 4.53 - goto out; 4.54 + return; 4.55 4.56 gsi = hvm_pci_intx_gsi(device, intx); 4.57 --hvm_irq->gsi_assert_count[gsi]; 4.58 @@ -76,8 +79,15 @@ void hvm_pci_intx_deassert( 4.59 if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq && 4.60 (--hvm_irq->gsi_assert_count[isa_irq] == 0) ) 4.61 vpic_irq_negative_edge(d, isa_irq); 4.62 +} 4.63 4.64 - out: 4.65 +void hvm_pci_intx_deassert( 4.66 + struct domain *d, unsigned int device, unsigned int intx) 4.67 +{ 4.68 + struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 4.69 + 4.70 + spin_lock(&hvm_irq->lock); 4.71 + __hvm_pci_intx_deassert(d, device, intx); 4.72 spin_unlock(&hvm_irq->lock); 4.73 } 4.74 4.75 @@ -123,37 +133,47 @@ void hvm_set_callback_irq_level(void) 4.76 struct vcpu *v = current; 4.77 struct domain *d = v->domain; 4.78 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 4.79 - unsigned int gsi = hvm_irq->callback_gsi; 4.80 + unsigned int gsi, pdev, pintx, asserted; 4.81 4.82 /* Fast lock-free tests. */ 4.83 - if ( (v->vcpu_id != 0) || (gsi == 0) ) 4.84 + if ( (v->vcpu_id != 0) || 4.85 + (hvm_irq->callback_via_type == HVMIRQ_callback_none) ) 4.86 return; 4.87 4.88 spin_lock(&hvm_irq->lock); 4.89 4.90 - gsi = hvm_irq->callback_gsi; 4.91 - if ( gsi == 0 ) 4.92 + /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */ 4.93 + asserted = !!vcpu_info(v, evtchn_upcall_pending); 4.94 + if ( hvm_irq->callback_via_asserted == asserted ) 4.95 goto out; 4.96 + hvm_irq->callback_via_asserted = asserted; 4.97 4.98 - /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */ 4.99 - if ( vcpu_info(v, evtchn_upcall_pending) ) 4.100 + /* Callback status has changed. Update the callback via. */ 4.101 + switch ( hvm_irq->callback_via_type ) 4.102 { 4.103 - if ( !__test_and_set_bit(0, &hvm_irq->callback_irq_wire) && 4.104 - (hvm_irq->gsi_assert_count[gsi]++ == 0) ) 4.105 + case HVMIRQ_callback_gsi: 4.106 + gsi = hvm_irq->callback_via.gsi; 4.107 + if ( asserted && (hvm_irq->gsi_assert_count[gsi]++ == 0) ) 4.108 { 4.109 vioapic_irq_positive_edge(d, gsi); 4.110 if ( gsi <= 15 ) 4.111 vpic_irq_positive_edge(d, gsi); 4.112 } 4.113 - } 4.114 - else 4.115 - { 4.116 - if ( __test_and_clear_bit(0, &hvm_irq->callback_irq_wire) && 4.117 - (--hvm_irq->gsi_assert_count[gsi] == 0) ) 4.118 + else if ( !asserted && (--hvm_irq->gsi_assert_count[gsi] == 0) ) 4.119 { 4.120 if ( gsi <= 15 ) 4.121 vpic_irq_negative_edge(d, gsi); 4.122 } 4.123 + break; 4.124 + case HVMIRQ_callback_pci_intx: 4.125 + pdev = hvm_irq->callback_via.pci.dev; 4.126 + pintx = hvm_irq->callback_via.pci.intx; 4.127 + if ( asserted ) 4.128 + __hvm_pci_intx_assert(d, pdev, pintx); 4.129 + else 4.130 + __hvm_pci_intx_deassert(d, pdev, pintx); 4.131 + default: 4.132 + break; 4.133 } 4.134 4.135 out: 4.136 @@ -193,40 +213,79 @@ void hvm_set_pci_link_route(struct domai 4.137 d->domain_id, link, old_isa_irq, isa_irq); 4.138 } 4.139 4.140 -void hvm_set_callback_gsi(struct domain *d, unsigned int gsi) 4.141 +void hvm_set_callback_via(struct domain *d, uint64_t via) 4.142 { 4.143 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 4.144 - unsigned int old_gsi; 4.145 + unsigned int gsi=0, pdev=0, pintx=0; 4.146 + uint8_t via_type; 4.147 4.148 - if ( gsi >= ARRAY_SIZE(hvm_irq->gsi_assert_count) ) 4.149 - gsi = 0; 4.150 + via_type = (uint8_t)(via >> 56) + 1; 4.151 + if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) || 4.152 + (via_type > HVMIRQ_callback_pci_intx) ) 4.153 + via_type = HVMIRQ_callback_none; 4.154 4.155 spin_lock(&hvm_irq->lock); 4.156 4.157 - old_gsi = hvm_irq->callback_gsi; 4.158 - if ( old_gsi == gsi ) 4.159 - goto out; 4.160 - hvm_irq->callback_gsi = gsi; 4.161 - 4.162 - if ( !test_bit(0, &hvm_irq->callback_irq_wire) ) 4.163 - goto out; 4.164 - 4.165 - if ( old_gsi && (--hvm_irq->gsi_assert_count[old_gsi] == 0) ) 4.166 - if ( old_gsi <= 15 ) 4.167 - vpic_irq_negative_edge(d, old_gsi); 4.168 - 4.169 - if ( gsi && (hvm_irq->gsi_assert_count[gsi]++ == 0) ) 4.170 + /* Tear down old callback via. */ 4.171 + if ( hvm_irq->callback_via_asserted ) 4.172 { 4.173 - vioapic_irq_positive_edge(d, gsi); 4.174 - if ( gsi <= 15 ) 4.175 - vpic_irq_positive_edge(d, gsi); 4.176 + switch ( hvm_irq->callback_via_type ) 4.177 + { 4.178 + case HVMIRQ_callback_gsi: 4.179 + gsi = hvm_irq->callback_via.gsi; 4.180 + if ( (--hvm_irq->gsi_assert_count[gsi] == 0) && (gsi <= 15) ) 4.181 + vpic_irq_negative_edge(d, gsi); 4.182 + break; 4.183 + case HVMIRQ_callback_pci_intx: 4.184 + pdev = hvm_irq->callback_via.pci.dev; 4.185 + pintx = hvm_irq->callback_via.pci.intx; 4.186 + __hvm_pci_intx_deassert(d, pdev, pintx); 4.187 + break; 4.188 + default: 4.189 + break; 4.190 + } 4.191 } 4.192 4.193 - out: 4.194 + /* Set up new callback via. */ 4.195 + switch ( hvm_irq->callback_via_type = via_type ) 4.196 + { 4.197 + case HVMIRQ_callback_gsi: 4.198 + gsi = hvm_irq->callback_via.gsi = (uint8_t)via; 4.199 + if ( (gsi == 0) || (gsi >= ARRAY_SIZE(hvm_irq->gsi_assert_count)) ) 4.200 + hvm_irq->callback_via_type = HVMIRQ_callback_none; 4.201 + else if ( hvm_irq->callback_via_asserted && 4.202 + (hvm_irq->gsi_assert_count[gsi]++ == 0) ) 4.203 + { 4.204 + vioapic_irq_positive_edge(d, gsi); 4.205 + if ( gsi <= 15 ) 4.206 + vpic_irq_positive_edge(d, gsi); 4.207 + } 4.208 + break; 4.209 + case HVMIRQ_callback_pci_intx: 4.210 + pdev = hvm_irq->callback_via.pci.dev = (uint8_t)(via >> 11) & 31; 4.211 + pintx = hvm_irq->callback_via.pci.intx = (uint8_t)via & 3; 4.212 + if ( hvm_irq->callback_via_asserted ) 4.213 + __hvm_pci_intx_assert(d, pdev, pintx); 4.214 + break; 4.215 + default: 4.216 + break; 4.217 + } 4.218 + 4.219 spin_unlock(&hvm_irq->lock); 4.220 4.221 - dprintk(XENLOG_G_INFO, "Dom%u callback GSI changed %u -> %u\n", 4.222 - d->domain_id, old_gsi, gsi); 4.223 + dprintk(XENLOG_G_INFO, "Dom%u callback via changed to ", d->domain_id); 4.224 + switch ( via_type ) 4.225 + { 4.226 + case HVMIRQ_callback_gsi: 4.227 + printk("GSI %u\n", gsi); 4.228 + break; 4.229 + case HVMIRQ_callback_pci_intx: 4.230 + printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx); 4.231 + break; 4.232 + default: 4.233 + printk("None\n"); 4.234 + break; 4.235 + } 4.236 } 4.237 4.238 int cpu_has_pending_irq(struct vcpu *v)
5.1 --- a/xen/include/asm-x86/hvm/irq.h Wed Jan 10 14:14:30 2007 +0000 5.2 +++ b/xen/include/asm-x86/hvm/irq.h Wed Jan 10 15:05:00 2007 +0000 5.3 @@ -43,9 +43,17 @@ struct hvm_irq { 5.4 */ 5.5 DECLARE_BITMAP(isa_irq, 16); 5.6 5.7 - /* Virtual interrupt wire and GSI link for paravirtual platform driver. */ 5.8 - DECLARE_BITMAP(callback_irq_wire, 1); 5.9 - unsigned int callback_gsi; 5.10 + /* Virtual interrupt and via-link for paravirtual platform driver. */ 5.11 + unsigned int callback_via_asserted; 5.12 + enum { 5.13 + HVMIRQ_callback_none, 5.14 + HVMIRQ_callback_gsi, 5.15 + HVMIRQ_callback_pci_intx 5.16 + } callback_via_type; 5.17 + union { 5.18 + unsigned int gsi; 5.19 + struct { uint8_t dev, intx; } pci; 5.20 + } callback_via; 5.21 5.22 /* 5.23 * PCI-ISA interrupt router. 5.24 @@ -105,7 +113,7 @@ void hvm_isa_irq_deassert( 5.25 void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq); 5.26 5.27 void hvm_set_callback_irq_level(void); 5.28 -void hvm_set_callback_gsi(struct domain *d, unsigned int gsi); 5.29 +void hvm_set_callback_via(struct domain *d, uint64_t via); 5.30 5.31 int cpu_get_interrupt(struct vcpu *v, int *type); 5.32 int cpu_has_pending_irq(struct vcpu *v);
6.1 --- a/xen/include/public/hvm/params.h Wed Jan 10 14:14:30 2007 +0000 6.2 +++ b/xen/include/public/hvm/params.h Wed Jan 10 15:05:00 2007 +0000 6.3 @@ -24,13 +24,33 @@ 6.4 6.5 #include "hvm_op.h" 6.6 6.7 -/* Parameter space for HVMOP_{set,get}_param. */ 6.8 +/* 6.9 + * Parameter space for HVMOP_{set,get}_param. 6.10 + */ 6.11 + 6.12 +/* 6.13 + * How should CPU0 event-channel notifications be delivered? 6.14 + * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). 6.15 + * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: 6.16 + * Domain = val[47:32], Bus = val[31:16], 6.17 + * DevFn = val[15: 8], IntX = val[ 1: 0] 6.18 + * If val == 0 then CPU0 event-channel notifications are not delivered. 6.19 + */ 6.20 #define HVM_PARAM_CALLBACK_IRQ 0 6.21 + 6.22 +/* 6.23 + * These are not used by Xen. They are here for convenience of HVM-guest 6.24 + * xenbus implementations. 6.25 + */ 6.26 #define HVM_PARAM_STORE_PFN 1 6.27 #define HVM_PARAM_STORE_EVTCHN 2 6.28 + 6.29 #define HVM_PARAM_PAE_ENABLED 4 6.30 + 6.31 #define HVM_PARAM_IOREQ_PFN 5 6.32 + 6.33 #define HVM_PARAM_BUFIOREQ_PFN 6 6.34 + 6.35 #define HVM_NR_PARAMS 7 6.36 6.37 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */