ia64/xen-unstable

annotate xen/drivers/passthrough/vtd/qinval.c @ 19734:4fb8a6c993e2

VT-d: correct way to submit command to GCMD register

Per VT-d spec, software should submit only one "incremental" command
at a time to Global Command reigster. Current implementation uses a
variable (gcmd) to record the state of Global Status register. It's
error prone.

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 09:29:42 2009 +0100 (2009-06-05)
parents a69daf23602a
children cc07094a02e4
rev   line source
keir@17099 1 /*
keir@17099 2 * Copyright (c) 2006, Intel Corporation.
keir@17099 3 *
keir@17099 4 * This program is free software; you can redistribute it and/or modify it
keir@17099 5 * under the terms and conditions of the GNU General Public License,
keir@17099 6 * version 2, as published by the Free Software Foundation.
keir@17099 7 *
keir@17099 8 * This program is distributed in the hope it will be useful, but WITHOUT
keir@17099 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
keir@17099 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
keir@17099 11 * more details.
keir@17099 12 *
keir@17099 13 * You should have received a copy of the GNU General Public License along with
keir@17099 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
keir@17099 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
keir@17099 16 *
keir@17099 17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
keir@17099 18 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
keir@17099 19 */
keir@17099 20
keir@17099 21
keir@17099 22 #include <xen/sched.h>
keir@17212 23 #include <xen/iommu.h>
keir@17434 24 #include <xen/time.h>
keir@17443 25 #include <xen/pci.h>
keir@17540 26 #include <xen/pci_regs.h>
keir@17212 27 #include "iommu.h"
keir@17099 28 #include "dmar.h"
keir@17099 29 #include "vtd.h"
keir@17099 30 #include "extern.h"
keir@17099 31
keir@19673 32 int qinval_enabled;
keir@19673 33
keir@17099 34 static void print_qi_regs(struct iommu *iommu)
keir@17099 35 {
keir@17099 36 u64 val;
keir@17099 37
keir@17099 38 val = dmar_readq(iommu->reg, DMAR_IQA_REG);
keir@18834 39 printk("DMAR_IQA_REG = %"PRIx64"\n", val);
keir@17099 40
keir@17099 41 val = dmar_readq(iommu->reg, DMAR_IQH_REG);
keir@18834 42 printk("DMAR_IQH_REG = %"PRIx64"\n", val);
keir@17099 43
keir@17099 44 val = dmar_readq(iommu->reg, DMAR_IQT_REG);
keir@18834 45 printk("DMAR_IQT_REG = %"PRIx64"\n", val);
keir@17099 46 }
keir@17099 47
keir@17099 48 static int qinval_next_index(struct iommu *iommu)
keir@17099 49 {
keir@17099 50 u64 val;
keir@17099 51 val = dmar_readq(iommu->reg, DMAR_IQT_REG);
keir@17099 52 return (val >> 4);
keir@17099 53 }
keir@17099 54
keir@17099 55 static int qinval_update_qtail(struct iommu *iommu, int index)
keir@17099 56 {
keir@17099 57 u64 val;
keir@17099 58
keir@17099 59 /* Need an ASSERT to insure that we have got register lock */
keir@17099 60 val = (index < (QINVAL_ENTRY_NR-1)) ? (index + 1) : 0;
keir@17099 61 dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << 4));
keir@17099 62 return 0;
keir@17099 63 }
keir@17099 64
keir@17099 65 static int gen_cc_inv_dsc(struct iommu *iommu, int index,
keir@17099 66 u16 did, u16 source_id, u8 function_mask, u8 granu)
keir@17099 67 {
keir@17099 68 unsigned long flags;
keir@17432 69 struct qinval_entry *qinval_entry = NULL, *qinval_entries;
keir@17099 70 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
keir@17099 71
keir@17099 72 spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
keir@17432 73 qinval_entries =
keir@17432 74 (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
keir@17432 75 qinval_entry = &qinval_entries[index];
keir@17099 76 qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT;
keir@17099 77 qinval_entry->q.cc_inv_dsc.lo.granu = granu;
keir@17099 78 qinval_entry->q.cc_inv_dsc.lo.res_1 = 0;
keir@17099 79 qinval_entry->q.cc_inv_dsc.lo.did = did;
keir@17099 80 qinval_entry->q.cc_inv_dsc.lo.sid = source_id;
keir@17099 81 qinval_entry->q.cc_inv_dsc.lo.fm = function_mask;
keir@17099 82 qinval_entry->q.cc_inv_dsc.lo.res_2 = 0;
keir@17099 83 qinval_entry->q.cc_inv_dsc.hi.res = 0;
keir@17432 84
keir@17432 85 unmap_vtd_domain_page(qinval_entries);
keir@17099 86 spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
keir@17099 87
keir@17099 88 return 0;
keir@17099 89 }
keir@17099 90
keir@17099 91 int queue_invalidate_context(struct iommu *iommu,
keir@17099 92 u16 did, u16 source_id, u8 function_mask, u8 granu)
keir@17099 93 {
keir@17099 94 int ret = -1;
keir@17099 95 unsigned long flags;
keir@17099 96 int index = -1;
keir@17099 97
keir@17099 98 spin_lock_irqsave(&iommu->register_lock, flags);
keir@17099 99 index = qinval_next_index(iommu);
keir@17432 100 if ( index == -1 )
keir@17099 101 return -EBUSY;
keir@17099 102 ret = gen_cc_inv_dsc(iommu, index, did, source_id,
keir@17099 103 function_mask, granu);
keir@17099 104 ret |= qinval_update_qtail(iommu, index);
keir@17099 105 spin_unlock_irqrestore(&iommu->register_lock, flags);
keir@17099 106 return ret;
keir@17099 107 }
keir@17099 108
keir@17099 109 static int gen_iotlb_inv_dsc(struct iommu *iommu, int index,
keir@17099 110 u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
keir@17099 111 {
keir@17099 112 unsigned long flags;
keir@17432 113 struct qinval_entry *qinval_entry = NULL, *qinval_entries;
keir@17099 114 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
keir@17099 115
keir@17099 116 if ( index == -1 )
keir@17099 117 return -1;
keir@17099 118 spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
keir@17099 119
keir@17432 120 qinval_entries =
keir@17432 121 (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
keir@17432 122 qinval_entry = &qinval_entries[index];
keir@17099 123 qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB;
keir@17099 124 qinval_entry->q.iotlb_inv_dsc.lo.granu = granu;
keir@17099 125 qinval_entry->q.iotlb_inv_dsc.lo.dr = 0;
keir@17099 126 qinval_entry->q.iotlb_inv_dsc.lo.dw = 0;
keir@17099 127 qinval_entry->q.iotlb_inv_dsc.lo.res_1 = 0;
keir@17099 128 qinval_entry->q.iotlb_inv_dsc.lo.did = did;
keir@17099 129 qinval_entry->q.iotlb_inv_dsc.lo.res_2 = 0;
keir@17099 130
keir@17099 131 qinval_entry->q.iotlb_inv_dsc.hi.am = am;
keir@17099 132 qinval_entry->q.iotlb_inv_dsc.hi.ih = ih;
keir@17099 133 qinval_entry->q.iotlb_inv_dsc.hi.res_1 = 0;
keir@17099 134 qinval_entry->q.iotlb_inv_dsc.hi.addr = addr;
keir@17099 135
keir@17432 136 unmap_vtd_domain_page(qinval_entries);
keir@17099 137 spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
keir@17099 138 return 0;
keir@17099 139 }
keir@17099 140
keir@17099 141 int queue_invalidate_iotlb(struct iommu *iommu,
keir@17099 142 u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
keir@17099 143 {
keir@17099 144 int ret = -1;
keir@17099 145 unsigned long flags;
keir@17099 146 int index = -1;
keir@17099 147
keir@17099 148 spin_lock_irqsave(&iommu->register_lock, flags);
keir@17099 149
keir@17099 150 index = qinval_next_index(iommu);
keir@17099 151 ret = gen_iotlb_inv_dsc(iommu, index, granu, dr, dw, did,
keir@17099 152 am, ih, addr);
keir@17099 153 ret |= qinval_update_qtail(iommu, index);
keir@17099 154 spin_unlock_irqrestore(&iommu->register_lock, flags);
keir@17099 155 return ret;
keir@17099 156 }
keir@17099 157
keir@17099 158 static int gen_wait_dsc(struct iommu *iommu, int index,
keir@17099 159 u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
keir@17099 160 {
keir@17099 161 unsigned long flags;
keir@17432 162 struct qinval_entry *qinval_entry = NULL, *qinval_entries;
keir@17099 163 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
keir@17099 164
keir@17099 165 if ( index == -1 )
keir@17099 166 return -1;
keir@17099 167 spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
keir@17432 168 qinval_entries =
keir@17432 169 (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
keir@17432 170 qinval_entry = &qinval_entries[index];
keir@17099 171 qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
keir@17099 172 qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
keir@17099 173 qinval_entry->q.inv_wait_dsc.lo.sw = sw;
keir@17099 174 qinval_entry->q.inv_wait_dsc.lo.fn = fn;
keir@17099 175 qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
keir@17099 176 qinval_entry->q.inv_wait_dsc.lo.sdata = sdata;
keir@17099 177 qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
keir@17099 178 qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(saddr) >> 2;
keir@17432 179 unmap_vtd_domain_page(qinval_entries);
keir@17099 180 spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
keir@17099 181 return 0;
keir@17099 182 }
keir@17099 183
keir@17099 184 static int queue_invalidate_wait(struct iommu *iommu,
keir@17099 185 u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
keir@17099 186 {
keir@17099 187 unsigned long flags;
keir@17434 188 s_time_t start_time;
keir@17099 189 int index = -1;
keir@17099 190 int ret = -1;
keir@17099 191 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
keir@17099 192
keir@17099 193 spin_lock_irqsave(&qi_ctrl->qinval_poll_lock, flags);
keir@18018 194 spin_lock(&iommu->register_lock);
keir@17099 195 index = qinval_next_index(iommu);
keir@17432 196 if ( *saddr == 1 )
keir@17099 197 *saddr = 0;
keir@17099 198 ret = gen_wait_dsc(iommu, index, iflag, sw, fn, sdata, saddr);
keir@17099 199 ret |= qinval_update_qtail(iommu, index);
keir@18018 200 spin_unlock(&iommu->register_lock);
keir@17099 201
keir@17099 202 /* Now we don't support interrupt method */
keir@17099 203 if ( sw )
keir@17099 204 {
keir@17099 205 /* In case all wait descriptor writes to same addr with same data */
keir@17434 206 start_time = NOW();
keir@17432 207 while ( *saddr != 1 )
keir@17432 208 {
keir@17434 209 if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
keir@17432 210 {
keir@17099 211 print_qi_regs(iommu);
keir@17099 212 panic("queue invalidate wait descriptor was not executed\n");
keir@17099 213 }
keir@17099 214 cpu_relax();
keir@17099 215 }
keir@17099 216 }
keir@17099 217 spin_unlock_irqrestore(&qi_ctrl->qinval_poll_lock, flags);
keir@17099 218 return ret;
keir@17099 219 }
keir@17099 220
keir@17099 221 int invalidate_sync(struct iommu *iommu)
keir@17099 222 {
keir@17099 223 int ret = -1;
keir@17099 224 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
keir@17099 225
keir@17937 226 if ( qi_ctrl->qinval_maddr != 0 )
keir@17099 227 {
keir@17099 228 ret = queue_invalidate_wait(iommu,
keir@17099 229 0, 1, 1, 1, &qi_ctrl->qinval_poll_status);
keir@17099 230 return ret;
keir@17099 231 }
keir@17099 232 return 0;
keir@17099 233 }
keir@17099 234
keir@17099 235 static int gen_dev_iotlb_inv_dsc(struct iommu *iommu, int index,
keir@17099 236 u32 max_invs_pend, u16 sid, u16 size, u64 addr)
keir@17099 237 {
keir@17099 238 unsigned long flags;
keir@17432 239 struct qinval_entry *qinval_entry = NULL, *qinval_entries;
keir@17099 240 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
keir@17099 241
keir@17099 242 if ( index == -1 )
keir@17099 243 return -1;
keir@17099 244 spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
keir@17099 245
keir@17432 246 qinval_entries =
keir@17432 247 (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
keir@17432 248 qinval_entry = &qinval_entries[index];
keir@17099 249 qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB;
keir@17099 250 qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
keir@17099 251 qinval_entry->q.dev_iotlb_inv_dsc.lo.max_invs_pend = max_invs_pend;
keir@17099 252 qinval_entry->q.dev_iotlb_inv_dsc.lo.res_2 = 0;
keir@17099 253 qinval_entry->q.dev_iotlb_inv_dsc.lo.sid = sid;
keir@17099 254 qinval_entry->q.dev_iotlb_inv_dsc.lo.res_3 = 0;
keir@17099 255
keir@17099 256 qinval_entry->q.dev_iotlb_inv_dsc.hi.size = size;
keir@18834 257 qinval_entry->q.dev_iotlb_inv_dsc.hi.res_1 = 0;
keir@18834 258 qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;
keir@17099 259
keir@17432 260 unmap_vtd_domain_page(qinval_entries);
keir@17099 261 spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
keir@17099 262 return 0;
keir@17099 263 }
keir@17099 264
keir@18834 265 int qinval_device_iotlb(struct iommu *iommu,
keir@17099 266 u32 max_invs_pend, u16 sid, u16 size, u64 addr)
keir@17099 267 {
keir@17099 268 int ret = -1;
keir@17099 269 unsigned long flags;
keir@17099 270 int index = -1;
keir@17099 271
keir@17099 272 spin_lock_irqsave(&iommu->register_lock, flags);
keir@17099 273 index = qinval_next_index(iommu);
keir@17099 274 ret = gen_dev_iotlb_inv_dsc(iommu, index, max_invs_pend,
keir@17099 275 sid, size, addr);
keir@17099 276 ret |= qinval_update_qtail(iommu, index);
keir@17099 277 spin_unlock_irqrestore(&iommu->register_lock, flags);
keir@17099 278 return ret;
keir@17099 279 }
keir@17099 280
keir@17099 281 static int gen_iec_inv_dsc(struct iommu *iommu, int index,
keir@17099 282 u8 granu, u8 im, u16 iidx)
keir@17099 283 {
keir@17099 284 unsigned long flags;
keir@17432 285 struct qinval_entry *qinval_entry = NULL, *qinval_entries;
keir@17099 286 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
keir@17099 287
keir@17099 288 if ( index == -1 )
keir@17099 289 return -1;
keir@17099 290 spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
keir@17099 291
keir@17432 292 qinval_entries =
keir@17432 293 (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
keir@17432 294 qinval_entry = &qinval_entries[index];
keir@17099 295 qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC;
keir@17099 296 qinval_entry->q.iec_inv_dsc.lo.granu = granu;
keir@17099 297 qinval_entry->q.iec_inv_dsc.lo.res_1 = 0;
keir@17099 298 qinval_entry->q.iec_inv_dsc.lo.im = im;
keir@17099 299 qinval_entry->q.iec_inv_dsc.lo.iidx = iidx;
keir@17099 300 qinval_entry->q.iec_inv_dsc.lo.res_2 = 0;
keir@17099 301 qinval_entry->q.iec_inv_dsc.hi.res = 0;
keir@17099 302
keir@17432 303 unmap_vtd_domain_page(qinval_entries);
keir@17099 304 spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
keir@17099 305 return 0;
keir@17099 306 }
keir@17099 307
keir@17099 308 int queue_invalidate_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
keir@17099 309 {
keir@17099 310 int ret;
keir@17099 311 unsigned long flags;
keir@17099 312 int index = -1;
keir@17099 313
keir@17099 314 spin_lock_irqsave(&iommu->register_lock, flags);
keir@17099 315 index = qinval_next_index(iommu);
keir@17099 316 ret = gen_iec_inv_dsc(iommu, index, granu, im, iidx);
keir@17099 317 ret |= qinval_update_qtail(iommu, index);
keir@17099 318 spin_unlock_irqrestore(&iommu->register_lock, flags);
keir@17099 319 return ret;
keir@17099 320 }
keir@17099 321
keir@17099 322 int __iommu_flush_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
keir@17099 323 {
keir@17099 324 int ret;
keir@17099 325 ret = queue_invalidate_iec(iommu, granu, im, iidx);
keir@17099 326 ret |= invalidate_sync(iommu);
keir@17099 327
keir@17099 328 /*
keir@17099 329 * reading vt-d architecture register will ensure
keir@17099 330 * draining happens in implementation independent way.
keir@17099 331 */
keir@19420 332 (void)dmar_readq(iommu->reg, DMAR_CAP_REG);
keir@17099 333 return ret;
keir@17099 334 }
keir@17099 335
keir@17099 336 int iommu_flush_iec_global(struct iommu *iommu)
keir@17099 337 {
keir@17099 338 return __iommu_flush_iec(iommu, IEC_GLOBAL_INVL, 0, 0);
keir@17099 339 }
keir@17099 340
keir@17099 341 int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx)
keir@17099 342 {
keir@17099 343 return __iommu_flush_iec(iommu, IEC_INDEX_INVL, im, iidx);
keir@17099 344 }
keir@17099 345
keir@17099 346 static int flush_context_qi(
keir@17099 347 void *_iommu, u16 did, u16 sid, u8 fm, u64 type,
keir@19673 348 int flush_non_present_entry)
keir@17099 349 {
keir@17099 350 int ret = 0;
keir@17099 351 struct iommu *iommu = (struct iommu *)_iommu;
keir@17099 352 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
keir@17099 353
keir@17099 354 /*
keir@17099 355 * In the non-present entry flush case, if hardware doesn't cache
keir@17099 356 * non-present entry we do nothing and if hardware cache non-present
keir@17099 357 * entry, we flush entries of domain 0 (the domain id is used to cache
keir@17099 358 * any non-present entries)
keir@17099 359 */
keir@19673 360 if ( flush_non_present_entry )
keir@17099 361 {
keir@17099 362 if ( !cap_caching_mode(iommu->cap) )
keir@17099 363 return 1;
keir@17099 364 else
keir@17099 365 did = 0;
keir@17099 366 }
keir@17099 367
keir@17432 368 if ( qi_ctrl->qinval_maddr != 0 )
keir@17099 369 {
keir@17099 370 ret = queue_invalidate_context(iommu, did, sid, fm,
keir@17099 371 type >> DMA_CCMD_INVL_GRANU_OFFSET);
keir@17099 372 ret |= invalidate_sync(iommu);
keir@17099 373 }
keir@17099 374 return ret;
keir@17099 375 }
keir@17099 376
keir@17099 377 static int flush_iotlb_qi(
keir@17099 378 void *_iommu, u16 did,
keir@17099 379 u64 addr, unsigned int size_order, u64 type,
keir@19673 380 int flush_non_present_entry, int flush_dev_iotlb)
keir@17099 381 {
keir@17099 382 u8 dr = 0, dw = 0;
keir@17099 383 int ret = 0;
keir@17099 384 struct iommu *iommu = (struct iommu *)_iommu;
keir@17099 385 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
keir@17099 386
keir@17099 387 /*
keir@17099 388 * In the non-present entry flush case, if hardware doesn't cache
keir@17099 389 * non-present entry we do nothing and if hardware cache non-present
keir@17099 390 * entry, we flush entries of domain 0 (the domain id is used to cache
keir@17099 391 * any non-present entries)
keir@17099 392 */
keir@19673 393 if ( flush_non_present_entry )
keir@17099 394 {
keir@17099 395 if ( !cap_caching_mode(iommu->cap) )
keir@17099 396 return 1;
keir@17099 397 else
keir@17099 398 did = 0;
keir@17099 399 }
keir@17099 400
keir@17432 401 if ( qi_ctrl->qinval_maddr != 0 )
keir@17432 402 {
keir@17099 403 /* use queued invalidation */
keir@17099 404 if (cap_write_drain(iommu->cap))
keir@17099 405 dw = 1;
keir@17099 406 if (cap_read_drain(iommu->cap))
keir@17099 407 dr = 1;
keir@17099 408 /* Need to conside the ih bit later */
keir@17099 409 ret = queue_invalidate_iotlb(iommu,
keir@17099 410 (type >> DMA_TLB_FLUSH_GRANU_OFFSET), dr,
keir@17099 411 dw, did, (u8)size_order, 0, addr);
keir@19673 412 if ( flush_dev_iotlb )
keir@19673 413 ret |= dev_invalidate_iotlb(iommu, did, addr, size_order, type);
keir@17099 414 ret |= invalidate_sync(iommu);
keir@17099 415 }
keir@17099 416 return ret;
keir@17099 417 }
keir@17099 418
keir@19420 419 int enable_qinval(struct iommu *iommu)
keir@17099 420 {
keir@17099 421 struct qi_ctrl *qi_ctrl;
keir@17099 422 struct iommu_flush *flush;
keir@19733 423 u32 sts;
keir@17099 424
keir@17099 425 qi_ctrl = iommu_qi_ctrl(iommu);
keir@17099 426 flush = iommu_get_flush(iommu);
keir@17099 427
keir@19420 428 ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
keir@17099 429
keir@17432 430 if ( qi_ctrl->qinval_maddr == 0 )
keir@17432 431 {
keir@19187 432 qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL, NUM_QINVAL_PAGES);
keir@17432 433 if ( qi_ctrl->qinval_maddr == 0 )
keir@18663 434 {
keir@18663 435 dprintk(XENLOG_WARNING VTDPREFIX,
keir@18663 436 "Cannot allocate memory for qi_ctrl->qinval_maddr\n");
keir@18663 437 return -ENOMEM;
keir@18663 438 }
keir@17099 439 }
keir@17099 440
keir@19463 441 flush->context = flush_context_qi;
keir@19463 442 flush->iotlb = flush_iotlb_qi;
keir@19463 443
keir@17099 444 /* Setup Invalidation Queue Address(IQA) register with the
keir@17099 445 * address of the page we just allocated. QS field at
keir@17099 446 * bits[2:0] to indicate size of queue is one 4KB page.
keir@17099 447 * That's 256 entries. Queued Head (IQH) and Queue Tail (IQT)
keir@17099 448 * registers are automatically reset to 0 with write
keir@17099 449 * to IQA register.
keir@17099 450 */
keir@19228 451 qi_ctrl->qinval_maddr |= IQA_REG_QS;
keir@17432 452 dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr);
keir@17099 453
keir@19420 454 dmar_writeq(iommu->reg, DMAR_IQT_REG, 0);
keir@19420 455
keir@17099 456 /* enable queued invalidation hardware */
keir@19734 457 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
keir@19734 458 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts | DMA_GCMD_QIE);
keir@17099 459
keir@17099 460 /* Make sure hardware complete it */
keir@19733 461 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
keir@19733 462 (sts & DMA_GSTS_QIES), sts);
keir@17937 463
keir@19673 464 qinval_enabled = 1;
keir@17937 465 return 0;
keir@17099 466 }
keir@19420 467
keir@19420 468 void disable_qinval(struct iommu *iommu)
keir@19420 469 {
keir@19733 470 u32 sts;
keir@19420 471
keir@19420 472 ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
keir@19420 473
keir@19734 474 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
keir@19734 475 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE));
keir@19420 476
keir@19420 477 /* Make sure hardware complete it */
keir@19733 478 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
keir@19733 479 !(sts & DMA_GSTS_QIES), sts);
keir@19420 480 }