ia64/xen-unstable

view xen/drivers/passthrough/io.c @ 17868:42323a447cbe

vt-d: Quieten down overzealous logging.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jun 16 11:49:15 2008 +0100 (2008-06-16)
parents a0561bcf9333
children 0076f6691b09
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
19 */
21 #include <xen/event.h>
22 #include <xen/iommu.h>
24 static void pt_irq_time_out(void *data)
25 {
26 struct hvm_mirq_dpci_mapping *irq_map = data;
27 unsigned int guest_gsi, machine_gsi = 0;
28 int vector;
29 struct hvm_irq_dpci *dpci = domain_get_irq_dpci(irq_map->dom);
30 struct dev_intx_gsi_link *digl;
31 uint32_t device, intx;
33 list_for_each_entry ( digl, &irq_map->digl_list, list )
34 {
35 guest_gsi = digl->gsi;
36 machine_gsi = dpci->girq[guest_gsi].machine_gsi;
37 device = digl->device;
38 intx = digl->intx;
39 hvm_pci_intx_deassert(irq_map->dom, device, intx);
40 }
42 clear_bit(machine_gsi, dpci->dirq_mask);
43 vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
44 stop_timer(&dpci->hvm_timer[vector]);
45 spin_lock(&dpci->dirq_lock);
46 dpci->mirq[machine_gsi].pending = 0;
47 spin_unlock(&dpci->dirq_lock);
48 pirq_guest_eoi(irq_map->dom, machine_gsi);
49 }
51 int pt_irq_create_bind_vtd(
52 struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
53 {
54 struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
55 uint32_t machine_gsi, guest_gsi;
56 uint32_t device, intx, link;
57 struct dev_intx_gsi_link *digl;
59 if ( hvm_irq_dpci == NULL )
60 {
61 hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
62 if ( hvm_irq_dpci == NULL )
63 return -ENOMEM;
65 memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
66 spin_lock_init(&hvm_irq_dpci->dirq_lock);
67 for ( int i = 0; i < NR_IRQS; i++ )
68 INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
70 if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
71 xfree(hvm_irq_dpci);
72 }
74 if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
75 {
76 int pirq = pt_irq_bind->machine_irq;
78 hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |HVM_IRQ_DPCI_MSI ;
79 hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
80 hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
82 hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
84 pirq_guest_bind(d->vcpu[0], pirq, BIND_PIRQ__WILL_SHARE);
85 }
86 else
87 {
88 machine_gsi = pt_irq_bind->machine_irq;
89 device = pt_irq_bind->u.pci.device;
90 intx = pt_irq_bind->u.pci.intx;
91 guest_gsi = hvm_pci_intx_gsi(device, intx);
92 link = hvm_pci_intx_link(device, intx);
93 hvm_irq_dpci->link_cnt[link]++;
95 digl = xmalloc(struct dev_intx_gsi_link);
96 if ( !digl )
97 return -ENOMEM;
99 digl->device = device;
100 digl->intx = intx;
101 digl->gsi = guest_gsi;
102 digl->link = link;
103 list_add_tail(&digl->list,
104 &hvm_irq_dpci->mirq[machine_gsi].digl_list);
106 hvm_irq_dpci->girq[guest_gsi].valid = 1;
107 hvm_irq_dpci->girq[guest_gsi].device = device;
108 hvm_irq_dpci->girq[guest_gsi].intx = intx;
109 hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
111 /* Bind the same mirq once in the same domain */
112 if ( !(hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
113 {
114 hvm_irq_dpci->mirq[machine_gsi].flags |= HVM_IRQ_DPCI_VALID;
115 hvm_irq_dpci->mirq[machine_gsi].dom = d;
117 init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)],
118 pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
119 /* Deal with gsi for legacy devices */
120 pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
121 }
123 gdprintk(XENLOG_INFO VTDPREFIX,
124 "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
125 machine_gsi, device, intx);
126 }
127 return 0;
128 }
130 int pt_irq_destroy_bind_vtd(
131 struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
132 {
133 struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
134 uint32_t machine_gsi, guest_gsi;
135 uint32_t device, intx, link;
136 struct list_head *digl_list, *tmp;
137 struct dev_intx_gsi_link *digl;
139 if ( hvm_irq_dpci == NULL )
140 return 0;
142 machine_gsi = pt_irq_bind->machine_irq;
143 device = pt_irq_bind->u.pci.device;
144 intx = pt_irq_bind->u.pci.intx;
145 guest_gsi = hvm_pci_intx_gsi(device, intx);
146 link = hvm_pci_intx_link(device, intx);
147 hvm_irq_dpci->link_cnt[link]--;
149 gdprintk(XENLOG_INFO,
150 "pt_irq_destroy_bind_vtd: machine_gsi=%d "
151 "guest_gsi=%d, device=%d, intx=%d.\n",
152 machine_gsi, guest_gsi, device, intx);
153 memset(&hvm_irq_dpci->girq[guest_gsi], 0,
154 sizeof(struct hvm_girq_dpci_mapping));
156 /* clear the mirq info */
157 if ( (hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
158 {
159 list_for_each_safe ( digl_list, tmp,
160 &hvm_irq_dpci->mirq[machine_gsi].digl_list )
161 {
162 digl = list_entry(digl_list,
163 struct dev_intx_gsi_link, list);
164 if ( digl->device == device &&
165 digl->intx == intx &&
166 digl->link == link &&
167 digl->gsi == guest_gsi )
168 {
169 list_del(&digl->list);
170 xfree(digl);
171 }
172 }
174 if ( list_empty(&hvm_irq_dpci->mirq[machine_gsi].digl_list) )
175 {
176 pirq_guest_unbind(d, machine_gsi);
177 kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]);
178 hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
179 hvm_irq_dpci->mirq[machine_gsi].flags = 0;
180 }
181 }
183 gdprintk(XENLOG_INFO,
184 "XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
185 machine_gsi, device, intx);
187 return 0;
188 }
190 int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
191 {
192 struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
194 if ( !iommu_enabled || (d == dom0) || !dpci ||
195 !dpci->mirq[mirq].flags & HVM_IRQ_DPCI_VALID )
196 return 0;
198 /*
199 * Set a timer here to avoid situations where the IRQ line is shared, and
200 * the device belonging to the pass-through guest is not yet active. In
201 * this case the guest may not pick up the interrupt (e.g., masked at the
202 * PIC) and we need to detect that.
203 */
204 set_bit(mirq, dpci->dirq_mask);
205 if ( !test_bit(_HVM_IRQ_DPCI_MSI, &dpci->mirq[mirq].flags) )
206 set_timer(&dpci->hvm_timer[domain_irq_to_vector(d, mirq)],
207 NOW() + PT_IRQ_TIME_OUT);
208 vcpu_kick(d->vcpu[0]);
210 return 1;
211 }
214 void hvm_dpci_msi_eoi(struct domain *d, int vector)
215 {
216 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
217 int pirq;
219 if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
220 return;
222 pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
223 if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
224 (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID) &&
225 (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
226 pirq_guest_eoi(d, pirq);
227 }
229 void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
230 union vioapic_redir_entry *ent)
231 {
232 struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
233 uint32_t device, intx, machine_gsi;
235 if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
236 (guest_gsi >= NR_ISAIRQS &&
237 !hvm_irq_dpci->girq[guest_gsi].valid) )
238 return;
240 if ( guest_gsi < NR_ISAIRQS )
241 {
242 hvm_dpci_isairq_eoi(d, guest_gsi);
243 return;
244 }
246 machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
247 device = hvm_irq_dpci->girq[guest_gsi].device;
248 intx = hvm_irq_dpci->girq[guest_gsi].intx;
249 hvm_pci_intx_deassert(d, device, intx);
251 spin_lock(&hvm_irq_dpci->dirq_lock);
252 if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
253 {
254 spin_unlock(&hvm_irq_dpci->dirq_lock);
256 stop_timer(&hvm_irq_dpci->hvm_timer[
257 domain_irq_to_vector(d, machine_gsi)]);
258 if ( (ent == NULL) || !ent->fields.mask )
259 pirq_guest_eoi(d, machine_gsi);
260 }
261 else
262 spin_unlock(&hvm_irq_dpci->dirq_lock);
263 }