ia64/xen-unstable

view xen/drivers/passthrough/amd/iommu_map.c @ 19139:2d70ad9c3bc7

amd-iommu: obtain page_alloc_lock before traversing a domain's page list

From all I can tell, this doesn't violate lock ordering as other
places call heap allocation functions from inside hd->mapping_lock.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jan 30 11:13:06 2009 +0000 (2009-01-30)
parents 5848b49b74fc
children 102576868e8d
line source
1 /*
2 * Copyright (C) 2007 Advanced Micro Devices, Inc.
3 * Author: Leo Duran <leo.duran@amd.com>
4 * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 #include <xen/sched.h>
22 #include <xen/hvm/iommu.h>
23 #include <asm/amd-iommu.h>
24 #include <asm/hvm/svm/amd-iommu-proto.h>
26 long amd_iommu_poll_comp_wait = COMPLETION_WAIT_DEFAULT_POLLING_COUNT;
28 static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
29 {
30 u32 tail, head, *cmd_buffer;
31 int i;
33 tail = iommu->cmd_buffer_tail;
34 if ( ++tail == iommu->cmd_buffer.entries )
35 tail = 0;
36 head = get_field_from_reg_u32(
37 readl(iommu->mmio_base+IOMMU_CMD_BUFFER_HEAD_OFFSET),
38 IOMMU_CMD_BUFFER_HEAD_MASK,
39 IOMMU_CMD_BUFFER_HEAD_SHIFT);
40 if ( head != tail )
41 {
42 cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer +
43 (iommu->cmd_buffer_tail *
44 IOMMU_CMD_BUFFER_ENTRY_SIZE));
45 for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; i++ )
46 cmd_buffer[i] = cmd[i];
48 iommu->cmd_buffer_tail = tail;
49 return 1;
50 }
52 return 0;
53 }
55 static void commit_iommu_command_buffer(struct amd_iommu *iommu)
56 {
57 u32 tail;
59 set_field_in_reg_u32(iommu->cmd_buffer_tail, 0,
60 IOMMU_CMD_BUFFER_TAIL_MASK,
61 IOMMU_CMD_BUFFER_TAIL_SHIFT, &tail);
62 writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
63 }
65 int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
66 {
67 if ( queue_iommu_command(iommu, cmd) )
68 {
69 commit_iommu_command_buffer(iommu);
70 return 1;
71 }
73 return 0;
74 }
76 static void invalidate_iommu_page(struct amd_iommu *iommu,
77 u64 io_addr, u16 domain_id)
78 {
79 u64 addr_lo, addr_hi;
80 u32 cmd[4], entry;
82 addr_lo = io_addr & DMA_32BIT_MASK;
83 addr_hi = io_addr >> 32;
85 set_field_in_reg_u32(domain_id, 0,
86 IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
87 IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
88 set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
89 IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
90 &entry);
91 cmd[1] = entry;
93 set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, 0,
94 IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
95 IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
96 set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
97 IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
98 IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
99 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
100 IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
101 IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
102 cmd[2] = entry;
104 set_field_in_reg_u32((u32)addr_hi, 0,
105 IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
106 IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
107 cmd[3] = entry;
109 cmd[0] = 0;
110 send_iommu_command(iommu, cmd);
111 }
113 void flush_command_buffer(struct amd_iommu *iommu)
114 {
115 u32 cmd[4], status;
116 int loop_count, comp_wait;
118 /* clear 'ComWaitInt' in status register (WIC) */
119 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
120 IOMMU_STATUS_COMP_WAIT_INT_MASK,
121 IOMMU_STATUS_COMP_WAIT_INT_SHIFT, &status);
122 writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
124 /* send an empty COMPLETION_WAIT command to flush command buffer */
125 cmd[3] = cmd[2] = 0;
126 set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0,
127 IOMMU_CMD_OPCODE_MASK,
128 IOMMU_CMD_OPCODE_SHIFT, &cmd[1]);
129 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
130 IOMMU_COMP_WAIT_I_FLAG_MASK,
131 IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]);
132 send_iommu_command(iommu, cmd);
134 /* wait for 'ComWaitInt' to signal comp#endifletion? */
135 if ( amd_iommu_poll_comp_wait )
136 {
137 loop_count = amd_iommu_poll_comp_wait;
138 do {
139 status = readl(iommu->mmio_base +
140 IOMMU_STATUS_MMIO_OFFSET);
141 comp_wait = get_field_from_reg_u32(
142 status,
143 IOMMU_STATUS_COMP_WAIT_INT_MASK,
144 IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
145 --loop_count;
146 } while ( loop_count && !comp_wait );
148 if ( comp_wait )
149 {
150 /* clear 'ComWaitInt' in status register (WIC) */
151 status &= IOMMU_STATUS_COMP_WAIT_INT_MASK;
152 writel(status, iommu->mmio_base +
153 IOMMU_STATUS_MMIO_OFFSET);
154 }
155 else
156 {
157 amd_iov_warning("Warning: ComWaitInt bit did not assert!\n");
158 }
159 }
160 }
162 static void clear_iommu_l1e_present(u64 l2e, unsigned long gfn)
163 {
164 u32 *l1e;
165 int offset;
166 void *l1_table;
168 l1_table = map_domain_page(l2e >> PAGE_SHIFT);
170 offset = gfn & (~PTE_PER_TABLE_MASK);
171 l1e = (u32*)(l1_table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE));
173 /* clear l1 entry */
174 l1e[0] = l1e[1] = 0;
176 unmap_domain_page(l1_table);
177 }
179 static void set_iommu_l1e_present(u64 l2e, unsigned long gfn,
180 u64 maddr, int iw, int ir)
181 {
182 u64 addr_lo, addr_hi;
183 u32 entry;
184 void *l1_table;
185 int offset;
186 u32 *l1e;
188 l1_table = map_domain_page(l2e >> PAGE_SHIFT);
190 offset = gfn & (~PTE_PER_TABLE_MASK);
191 l1e = (u32*)((u8*)l1_table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE));
193 addr_lo = maddr & DMA_32BIT_MASK;
194 addr_hi = maddr >> 32;
196 set_field_in_reg_u32((u32)addr_hi, 0,
197 IOMMU_PTE_ADDR_HIGH_MASK,
198 IOMMU_PTE_ADDR_HIGH_SHIFT, &entry);
199 set_field_in_reg_u32(iw ? IOMMU_CONTROL_ENABLED :
200 IOMMU_CONTROL_DISABLED, entry,
201 IOMMU_PTE_IO_WRITE_PERMISSION_MASK,
202 IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT, &entry);
203 set_field_in_reg_u32(ir ? IOMMU_CONTROL_ENABLED :
204 IOMMU_CONTROL_DISABLED, entry,
205 IOMMU_PTE_IO_READ_PERMISSION_MASK,
206 IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry);
207 l1e[1] = entry;
209 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
210 IOMMU_PTE_ADDR_LOW_MASK,
211 IOMMU_PTE_ADDR_LOW_SHIFT, &entry);
212 set_field_in_reg_u32(IOMMU_PAGING_MODE_LEVEL_0, entry,
213 IOMMU_PTE_NEXT_LEVEL_MASK,
214 IOMMU_PTE_NEXT_LEVEL_SHIFT, &entry);
215 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
216 IOMMU_PTE_PRESENT_MASK,
217 IOMMU_PTE_PRESENT_SHIFT, &entry);
218 l1e[0] = entry;
220 unmap_domain_page(l1_table);
221 }
223 static void amd_iommu_set_page_directory_entry(u32 *pde,
224 u64 next_ptr, u8 next_level)
225 {
226 u64 addr_lo, addr_hi;
227 u32 entry;
229 addr_lo = next_ptr & DMA_32BIT_MASK;
230 addr_hi = next_ptr >> 32;
232 /* enable read/write permissions,which will be enforced at the PTE */
233 set_field_in_reg_u32((u32)addr_hi, 0,
234 IOMMU_PDE_ADDR_HIGH_MASK,
235 IOMMU_PDE_ADDR_HIGH_SHIFT, &entry);
236 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
237 IOMMU_PDE_IO_WRITE_PERMISSION_MASK,
238 IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT, &entry);
239 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
240 IOMMU_PDE_IO_READ_PERMISSION_MASK,
241 IOMMU_PDE_IO_READ_PERMISSION_SHIFT, &entry);
242 pde[1] = entry;
244 /* mark next level as 'present' */
245 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
246 IOMMU_PDE_ADDR_LOW_MASK,
247 IOMMU_PDE_ADDR_LOW_SHIFT, &entry);
248 set_field_in_reg_u32(next_level, entry,
249 IOMMU_PDE_NEXT_LEVEL_MASK,
250 IOMMU_PDE_NEXT_LEVEL_SHIFT, &entry);
251 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
252 IOMMU_PDE_PRESENT_MASK,
253 IOMMU_PDE_PRESENT_SHIFT, &entry);
254 pde[0] = entry;
255 }
257 void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
258 u16 domain_id, u8 sys_mgt, u8 dev_ex,
259 u8 paging_mode)
260 {
261 u64 addr_hi, addr_lo;
262 u32 entry;
264 dte[7] = dte[6] = 0;
266 addr_lo = intremap_ptr & DMA_32BIT_MASK;
267 addr_hi = intremap_ptr >> 32;
269 set_field_in_reg_u32((u32)addr_hi, 0,
270 IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK,
271 IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT, &entry);
272 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
273 IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK,
274 IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT, &entry);
275 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
276 IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK,
277 IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT, &entry);
278 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
279 IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK,
280 IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT, &entry);
281 /* Fixed and arbitrated interrupts remapepd */
282 set_field_in_reg_u32(2, entry,
283 IOMMU_DEV_TABLE_INT_CONTROL_MASK,
284 IOMMU_DEV_TABLE_INT_CONTROL_SHIFT, &entry);
285 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
286 IOMMU_DEV_TABLE_LINT0_ENABLE_MASK,
287 IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT, &entry);
288 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
289 IOMMU_DEV_TABLE_LINT1_ENABLE_MASK,
290 IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT, &entry);
291 dte[5] = entry;
293 set_field_in_reg_u32((u32)addr_lo >> 6, 0,
294 IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK,
295 IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT, &entry);
296 /* 2048 entries */
297 set_field_in_reg_u32(0xB, entry,
298 IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK,
299 IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT, &entry);
300 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
301 IOMMU_DEV_TABLE_INT_VALID_MASK,
302 IOMMU_DEV_TABLE_INT_VALID_SHIFT, &entry);
303 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
304 IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_MASK,
305 IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_SHIFT, &entry);
306 dte[4] = entry;
308 set_field_in_reg_u32(sys_mgt, 0,
309 IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
310 IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry);
311 set_field_in_reg_u32(dev_ex, entry,
312 IOMMU_DEV_TABLE_ALLOW_EXCLUSION_MASK,
313 IOMMU_DEV_TABLE_ALLOW_EXCLUSION_SHIFT, &entry);
314 dte[3] = entry;
316 set_field_in_reg_u32(domain_id, 0,
317 IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
318 IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry);
319 dte[2] = entry;
321 addr_lo = root_ptr & DMA_32BIT_MASK;
322 addr_hi = root_ptr >> 32;
323 set_field_in_reg_u32((u32)addr_hi, 0,
324 IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
325 IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry);
326 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
327 IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK,
328 IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT, &entry);
329 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
330 IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK,
331 IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry);
332 dte[1] = entry;
334 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
335 IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
336 IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry);
337 set_field_in_reg_u32(paging_mode, entry,
338 IOMMU_DEV_TABLE_PAGING_MODE_MASK,
339 IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry);
340 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
341 IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
342 IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
343 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
344 IOMMU_DEV_TABLE_VALID_MASK,
345 IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
346 dte[0] = entry;
347 }
349 u64 amd_iommu_get_next_table_from_pte(u32 *entry)
350 {
351 u64 addr_lo, addr_hi, ptr;
353 addr_lo = get_field_from_reg_u32(
354 entry[0],
355 IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
356 IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT);
358 addr_hi = get_field_from_reg_u32(
359 entry[1],
360 IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
361 IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT);
363 ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
364 return ptr;
365 }
367 static int amd_iommu_is_pte_present(u32 *entry)
368 {
369 return (get_field_from_reg_u32(entry[0],
370 IOMMU_PDE_PRESENT_MASK,
371 IOMMU_PDE_PRESENT_SHIFT));
372 }
374 void invalidate_dev_table_entry(struct amd_iommu *iommu,
375 u16 device_id)
376 {
377 u32 cmd[4], entry;
379 cmd[3] = cmd[2] = 0;
380 set_field_in_reg_u32(device_id, 0,
381 IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK,
382 IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT, &entry);
383 cmd[0] = entry;
385 set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY, 0,
386 IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
387 &entry);
388 cmd[1] = entry;
390 send_iommu_command(iommu, cmd);
391 }
393 int amd_iommu_is_dte_page_translation_valid(u32 *entry)
394 {
395 return (get_field_from_reg_u32(entry[0],
396 IOMMU_DEV_TABLE_VALID_MASK,
397 IOMMU_DEV_TABLE_VALID_SHIFT) &&
398 get_field_from_reg_u32(entry[0],
399 IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
400 IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT));
401 }
403 static u64 iommu_l2e_from_pfn(struct page_info *table, int level,
404 unsigned long io_pfn)
405 {
406 unsigned long offset;
407 void *pde = NULL;
408 void *table_vaddr;
409 u64 next_table_maddr = 0;
411 BUG_ON( table == NULL || level == 0 );
413 while ( level > 1 )
414 {
415 offset = io_pfn >> ((PTE_PER_TABLE_SHIFT *
416 (level - IOMMU_PAGING_MODE_LEVEL_1)));
417 offset &= ~PTE_PER_TABLE_MASK;
419 table_vaddr = map_domain_page(page_to_mfn(table));
420 pde = table_vaddr + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
421 next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
423 if ( !amd_iommu_is_pte_present(pde) )
424 {
425 if ( next_table_maddr == 0 )
426 {
427 table = alloc_amd_iommu_pgtable();
428 if ( table == NULL )
429 return 0;
430 next_table_maddr = page_to_maddr(table);
431 amd_iommu_set_page_directory_entry(
432 (u32 *)pde, next_table_maddr, level - 1);
433 }
434 else /* should never reach here */
435 return 0;
436 }
438 unmap_domain_page(table_vaddr);
439 table = maddr_to_page(next_table_maddr);
440 level--;
441 }
443 return next_table_maddr;
444 }
446 int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
447 {
448 u64 iommu_l2e;
449 unsigned long flags;
450 struct hvm_iommu *hd = domain_hvm_iommu(d);
451 int iw = IOMMU_IO_WRITE_ENABLED;
452 int ir = IOMMU_IO_READ_ENABLED;
454 BUG_ON( !hd->root_table );
456 spin_lock_irqsave(&hd->mapping_lock, flags);
458 if ( is_hvm_domain(d) && !hd->p2m_synchronized )
459 goto out;
461 iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
462 if ( iommu_l2e == 0 )
463 {
464 amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
465 spin_unlock_irqrestore(&hd->mapping_lock, flags);
466 return -EFAULT;
467 }
468 set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
470 out:
471 spin_unlock_irqrestore(&hd->mapping_lock, flags);
472 return 0;
473 }
475 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
476 {
477 u64 iommu_l2e;
478 unsigned long flags;
479 struct amd_iommu *iommu;
480 struct hvm_iommu *hd = domain_hvm_iommu(d);
482 BUG_ON( !hd->root_table );
484 spin_lock_irqsave(&hd->mapping_lock, flags);
486 if ( is_hvm_domain(d) && !hd->p2m_synchronized )
487 {
488 spin_unlock_irqrestore(&hd->mapping_lock, flags);
489 return 0;
490 }
492 iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
494 if ( iommu_l2e == 0 )
495 {
496 amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
497 spin_unlock_irqrestore(&hd->mapping_lock, flags);
498 return -EFAULT;
499 }
501 /* mark PTE as 'page not present' */
502 clear_iommu_l1e_present(iommu_l2e, gfn);
503 spin_unlock_irqrestore(&hd->mapping_lock, flags);
505 /* send INVALIDATE_IOMMU_PAGES command */
506 for_each_amd_iommu ( iommu )
507 {
508 spin_lock_irqsave(&iommu->lock, flags);
509 invalidate_iommu_page(iommu, (u64)gfn << PAGE_SHIFT, hd->domain_id);
510 flush_command_buffer(iommu);
511 spin_unlock_irqrestore(&iommu->lock, flags);
512 }
514 return 0;
515 }
517 int amd_iommu_reserve_domain_unity_map(
518 struct domain *domain,
519 unsigned long phys_addr,
520 unsigned long size, int iw, int ir)
521 {
522 u64 iommu_l2e;
523 unsigned long flags, npages, i;
524 struct hvm_iommu *hd = domain_hvm_iommu(domain);
526 npages = region_to_pages(phys_addr, size);
528 spin_lock_irqsave(&hd->mapping_lock, flags);
529 for ( i = 0; i < npages; ++i )
530 {
531 iommu_l2e = iommu_l2e_from_pfn(
532 hd->root_table, hd->paging_mode, phys_addr >> PAGE_SHIFT);
534 if ( iommu_l2e == 0 )
535 {
536 amd_iov_error(
537 "Invalid IO pagetable entry phys_addr = %lx\n", phys_addr);
538 spin_unlock_irqrestore(&hd->mapping_lock, flags);
539 return -EFAULT;
540 }
542 set_iommu_l1e_present(iommu_l2e,
543 (phys_addr >> PAGE_SHIFT), phys_addr, iw, ir);
545 phys_addr += PAGE_SIZE;
546 }
547 spin_unlock_irqrestore(&hd->mapping_lock, flags);
548 return 0;
549 }
551 int amd_iommu_sync_p2m(struct domain *d)
552 {
553 unsigned long mfn, gfn, flags;
554 u64 iommu_l2e;
555 struct page_info *page;
556 struct hvm_iommu *hd;
557 int iw = IOMMU_IO_WRITE_ENABLED;
558 int ir = IOMMU_IO_READ_ENABLED;
560 if ( !is_hvm_domain(d) )
561 return 0;
563 hd = domain_hvm_iommu(d);
565 spin_lock_irqsave(&hd->mapping_lock, flags);
567 if ( hd->p2m_synchronized )
568 goto out;
570 spin_lock(&d->page_alloc_lock);
572 page_list_for_each ( page, &d->page_list )
573 {
574 mfn = page_to_mfn(page);
575 gfn = get_gpfn_from_mfn(mfn);
577 if ( gfn == INVALID_M2P_ENTRY )
578 continue;
580 iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
582 if ( iommu_l2e == 0 )
583 {
584 spin_unlock(&d->page_alloc_lock);
585 amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
586 spin_unlock_irqrestore(&hd->mapping_lock, flags);
587 return -EFAULT;
588 }
590 set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
591 }
593 spin_unlock(&d->page_alloc_lock);
595 hd->p2m_synchronized = 1;
597 out:
598 spin_unlock_irqrestore(&hd->mapping_lock, flags);
599 return 0;
600 }
602 void invalidate_all_iommu_pages(struct domain *d)
603 {
604 u32 cmd[4], entry;
605 unsigned long flags;
606 struct amd_iommu *iommu;
607 int domain_id = d->domain_id;
608 u64 addr_lo = 0x7FFFFFFFFFFFF000ULL & DMA_32BIT_MASK;
609 u64 addr_hi = 0x7FFFFFFFFFFFF000ULL >> 32;
611 set_field_in_reg_u32(domain_id, 0,
612 IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
613 IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
614 set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
615 IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
616 &entry);
617 cmd[1] = entry;
619 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
620 IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
621 IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
622 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
623 IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
624 IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
625 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
626 IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
627 IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
628 cmd[2] = entry;
630 set_field_in_reg_u32((u32)addr_hi, 0,
631 IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
632 IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
633 cmd[3] = entry;
635 cmd[0] = 0;
637 for_each_amd_iommu ( iommu )
638 {
639 spin_lock_irqsave(&iommu->lock, flags);
640 send_iommu_command(iommu, cmd);
641 flush_command_buffer(iommu);
642 spin_unlock_irqrestore(&iommu->lock, flags);
643 }
644 }