ia64/xen-unstable
changeset 17727:c684cf331f94
Handle dynamic IOMMU map/unmap for guests
Perform IOMMU map/unmap when (a) frame type changes, (b) memory
reservation changes, and (c) a grant reference is newly mapped or
completely unmapped from a domain.
Signed-off-by: Espen Skoglund <espen.skoglund@netronome.com>
Perform IOMMU map/unmap when (a) frame type changes, (b) memory
reservation changes, and (c) a grant reference is newly mapped or
completely unmapped from a domain.
Signed-off-by: Espen Skoglund <espen.skoglund@netronome.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Sat May 24 09:42:02 2008 +0100 (2008-05-24) |
parents | 62f1c837057f |
children | 28083093cc5d |
files | xen/arch/x86/mm.c xen/arch/x86/mm/p2m.c xen/common/grant_table.c xen/common/memory.c |
line diff
1.1 --- a/xen/arch/x86/mm.c Sat May 24 09:37:35 2008 +0100 1.2 +++ b/xen/arch/x86/mm.c Sat May 24 09:42:02 2008 +0100 1.3 @@ -1939,6 +1939,20 @@ int get_page_type(struct page_info *page 1.4 } 1.5 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) ); 1.6 1.7 + if ( unlikely((x & PGT_type_mask) != type) ) 1.8 + { 1.9 + /* Special pages should not be accessible from devices. */ 1.10 + struct domain *d = page_get_owner(page); 1.11 + if ( d && unlikely(need_iommu(d)) ) 1.12 + { 1.13 + if ( (x & PGT_type_mask) == PGT_writable_page ) 1.14 + iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page))); 1.15 + else if ( type == PGT_writable_page ) 1.16 + iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)), 1.17 + page_to_mfn(page)); 1.18 + } 1.19 + } 1.20 + 1.21 if ( unlikely(!(nx & PGT_validated)) ) 1.22 { 1.23 /* Try to validate page type; drop the new reference on failure. */
2.1 --- a/xen/arch/x86/mm/p2m.c Sat May 24 09:37:35 2008 +0100 2.2 +++ b/xen/arch/x86/mm/p2m.c Sat May 24 09:42:02 2008 +0100 2.3 @@ -325,7 +325,7 @@ p2m_set_entry(struct domain *d, unsigned 2.4 if ( mfn_valid(mfn) && (gfn > d->arch.p2m->max_mapped_pfn) ) 2.5 d->arch.p2m->max_mapped_pfn = gfn; 2.6 2.7 - if ( iommu_enabled && is_hvm_domain(d) ) 2.8 + if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) ) 2.9 { 2.10 if ( p2mt == p2m_ram_rw ) 2.11 for ( i = 0; i < (1UL << page_order); i++ ) 2.12 @@ -868,7 +868,12 @@ p2m_remove_page(struct domain *d, unsign 2.13 unsigned long i; 2.14 2.15 if ( !paging_mode_translate(d) ) 2.16 + { 2.17 + if ( need_iommu(d) ) 2.18 + for ( i = 0; i < (1 << page_order); i++ ) 2.19 + iommu_unmap_page(d, mfn + i); 2.20 return; 2.21 + } 2.22 2.23 P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn); 2.24 2.25 @@ -899,7 +904,19 @@ guest_physmap_add_entry(struct domain *d 2.26 int rc = 0; 2.27 2.28 if ( !paging_mode_translate(d) ) 2.29 - return -EINVAL; 2.30 + { 2.31 + if ( need_iommu(d) && t == p2m_ram_rw ) 2.32 + { 2.33 + for ( i = 0; i < (1 << page_order); i++ ) 2.34 + if ( (rc = iommu_map_page(d, mfn + i, mfn + i)) != 0 ) 2.35 + { 2.36 + while ( i-- > 0 ) 2.37 + iommu_unmap_page(d, mfn + i); 2.38 + return rc; 2.39 + } 2.40 + } 2.41 + return 0; 2.42 + } 2.43 2.44 #if CONFIG_PAGING_LEVELS == 3 2.45 /*
3.1 --- a/xen/common/grant_table.c Sat May 24 09:37:35 2008 +0100 3.2 +++ b/xen/common/grant_table.c Sat May 24 09:42:02 2008 +0100 3.3 @@ -196,8 +196,9 @@ static void 3.4 struct domain *ld, *rd; 3.5 struct vcpu *led; 3.6 int handle; 3.7 - unsigned long frame = 0; 3.8 + unsigned long frame = 0, nr_gets = 0; 3.9 int rc = GNTST_okay; 3.10 + u32 old_pin; 3.11 unsigned int cache_flags; 3.12 struct active_grant_entry *act; 3.13 struct grant_mapping *mt; 3.14 @@ -318,6 +319,7 @@ static void 3.15 } 3.16 } 3.17 3.18 + old_pin = act->pin; 3.19 if ( op->flags & GNTMAP_device_map ) 3.20 act->pin += (op->flags & GNTMAP_readonly) ? 3.21 GNTPIN_devr_inc : GNTPIN_devw_inc; 3.22 @@ -361,20 +363,17 @@ static void 3.23 rc = GNTST_general_error; 3.24 goto undo_out; 3.25 } 3.26 - 3.27 + 3.28 + nr_gets++; 3.29 if ( op->flags & GNTMAP_host_map ) 3.30 { 3.31 rc = create_grant_host_mapping(op->host_addr, frame, op->flags, 0); 3.32 if ( rc != GNTST_okay ) 3.33 - { 3.34 - if ( gnttab_host_mapping_get_page_type(op, ld, rd) ) 3.35 - put_page_type(mfn_to_page(frame)); 3.36 - put_page(mfn_to_page(frame)); 3.37 goto undo_out; 3.38 - } 3.39 3.40 if ( op->flags & GNTMAP_device_map ) 3.41 { 3.42 + nr_gets++; 3.43 (void)get_page(mfn_to_page(frame), rd); 3.44 if ( !(op->flags & GNTMAP_readonly) ) 3.45 get_page_type(mfn_to_page(frame), PGT_writable_page); 3.46 @@ -382,6 +381,17 @@ static void 3.47 } 3.48 } 3.49 3.50 + if ( need_iommu(ld) && 3.51 + !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) && 3.52 + (act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) ) 3.53 + { 3.54 + if ( iommu_map_page(ld, mfn_to_gmfn(ld, frame), frame) ) 3.55 + { 3.56 + rc = GNTST_general_error; 3.57 + goto undo_out; 3.58 + } 3.59 + } 3.60 + 3.61 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom); 3.62 3.63 mt = &maptrack_entry(ld->grant_table, handle); 3.64 @@ -397,6 +407,19 @@ static void 3.65 return; 3.66 3.67 undo_out: 3.68 + if ( nr_gets > 1 ) 3.69 + { 3.70 + if ( !(op->flags & GNTMAP_readonly) ) 3.71 + put_page_type(mfn_to_page(frame)); 3.72 + put_page(mfn_to_page(frame)); 3.73 + } 3.74 + if ( nr_gets > 0 ) 3.75 + { 3.76 + if ( gnttab_host_mapping_get_page_type(op, ld, rd) ) 3.77 + put_page_type(mfn_to_page(frame)); 3.78 + put_page(mfn_to_page(frame)); 3.79 + } 3.80 + 3.81 spin_lock(&rd->grant_table->lock); 3.82 3.83 act = &active_entry(rd->grant_table, op->ref); 3.84 @@ -451,6 +474,7 @@ static void 3.85 struct active_grant_entry *act; 3.86 grant_entry_t *sha; 3.87 s16 rc = 0; 3.88 + u32 old_pin; 3.89 3.90 ld = current->domain; 3.91 3.92 @@ -497,6 +521,7 @@ static void 3.93 3.94 act = &active_entry(rd->grant_table, op->map->ref); 3.95 sha = &shared_entry(rd->grant_table, op->map->ref); 3.96 + old_pin = act->pin; 3.97 3.98 if ( op->frame == 0 ) 3.99 { 3.100 @@ -534,6 +559,17 @@ static void 3.101 act->pin -= GNTPIN_hstw_inc; 3.102 } 3.103 3.104 + if ( need_iommu(ld) && 3.105 + (old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) && 3.106 + !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) ) 3.107 + { 3.108 + if ( iommu_unmap_page(ld, mfn_to_gmfn(ld, op->frame)) ) 3.109 + { 3.110 + rc = GNTST_general_error; 3.111 + goto unmap_out; 3.112 + } 3.113 + } 3.114 + 3.115 /* If just unmapped a writable mapping, mark as dirtied */ 3.116 if ( !(op->flags & GNTMAP_readonly) ) 3.117 gnttab_mark_dirty(rd, op->frame);
4.1 --- a/xen/common/memory.c Sat May 24 09:37:35 2008 +0100 4.2 +++ b/xen/common/memory.c Sat May 24 09:42:02 2008 +0100 4.3 @@ -124,12 +124,9 @@ static void populate_physmap(struct memo 4.4 } 4.5 4.6 mfn = page_to_mfn(page); 4.7 + guest_physmap_add_page(d, gpfn, mfn, a->extent_order); 4.8 4.9 - if ( unlikely(paging_mode_translate(d)) ) 4.10 - { 4.11 - guest_physmap_add_page(d, gpfn, mfn, a->extent_order); 4.12 - } 4.13 - else 4.14 + if ( !paging_mode_translate(d) ) 4.15 { 4.16 for ( j = 0; j < (1 << a->extent_order); j++ ) 4.17 set_gpfn_from_mfn(mfn + j, gpfn + j); 4.18 @@ -436,11 +433,9 @@ static long memory_exchange(XEN_GUEST_HA 4.19 &gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1); 4.20 4.21 mfn = page_to_mfn(page); 4.22 - if ( unlikely(paging_mode_translate(d)) ) 4.23 - { 4.24 - guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order); 4.25 - } 4.26 - else 4.27 + guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order); 4.28 + 4.29 + if ( !paging_mode_translate(d) ) 4.30 { 4.31 for ( k = 0; k < (1UL << exch.out.extent_order); k++ ) 4.32 set_gpfn_from_mfn(mfn + k, gpfn + k);