ia64/xen-unstable
changeset 19738:8dd5c3cae086
x86 hvm: move dirty_vram into struct hvm_domain
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Jun 05 14:04:03 2009 +0100 (2009-06-05) |
parents | 6eff3fe96aff |
children | 4448fae52553 |
files | xen/arch/x86/mm/hap/hap.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/include/asm-x86/hvm/domain.h xen/include/xen/sched.h |
line diff
1.1 --- a/xen/arch/x86/mm/hap/hap.c Fri Jun 05 09:32:03 2009 +0100 1.2 +++ b/xen/arch/x86/mm/hap/hap.c Fri Jun 05 14:04:03 2009 +0100 1.3 @@ -58,8 +58,9 @@ 1.4 int hap_enable_vram_tracking(struct domain *d) 1.5 { 1.6 int i; 1.7 + struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; 1.8 1.9 - if ( !d->dirty_vram ) 1.10 + if ( !dirty_vram ) 1.11 return -EINVAL; 1.12 1.13 /* turn on PG_log_dirty bit in paging mode */ 1.14 @@ -68,7 +69,7 @@ int hap_enable_vram_tracking(struct doma 1.15 hap_unlock(d); 1.16 1.17 /* set l1e entries of P2M table to be read-only. */ 1.18 - for (i = d->dirty_vram->begin_pfn; i < d->dirty_vram->end_pfn; i++) 1.19 + for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) 1.20 p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty); 1.21 1.22 flush_tlb_mask(&d->domain_dirty_cpumask); 1.23 @@ -78,8 +79,9 @@ int hap_enable_vram_tracking(struct doma 1.24 int hap_disable_vram_tracking(struct domain *d) 1.25 { 1.26 int i; 1.27 + struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; 1.28 1.29 - if ( !d->dirty_vram ) 1.30 + if ( !dirty_vram ) 1.31 return -EINVAL; 1.32 1.33 hap_lock(d); 1.34 @@ -87,7 +89,7 @@ int hap_disable_vram_tracking(struct dom 1.35 hap_unlock(d); 1.36 1.37 /* set l1e entries of P2M table with normal mode */ 1.38 - for (i = d->dirty_vram->begin_pfn; i < d->dirty_vram->end_pfn; i++) 1.39 + for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) 1.40 p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty); 1.41 1.42 flush_tlb_mask(&d->domain_dirty_cpumask); 1.43 @@ -97,12 +99,13 @@ int hap_disable_vram_tracking(struct dom 1.44 void hap_clean_vram_tracking(struct domain *d) 1.45 { 1.46 int i; 1.47 + struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; 1.48 1.49 - if ( !d->dirty_vram ) 1.50 + if ( !dirty_vram ) 1.51 return; 1.52 1.53 /* set l1e entries of P2M table to be read-only. */ 1.54 - for (i = d->dirty_vram->begin_pfn; i < d->dirty_vram->end_pfn; i++) 1.55 + for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) 1.56 p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty); 1.57 1.58 flush_tlb_mask(&d->domain_dirty_cpumask); 1.59 @@ -121,30 +124,32 @@ int hap_track_dirty_vram(struct domain * 1.60 XEN_GUEST_HANDLE_64(uint8) dirty_bitmap) 1.61 { 1.62 long rc = 0; 1.63 + struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; 1.64 1.65 if ( nr ) 1.66 { 1.67 - if ( paging_mode_log_dirty(d) && d->dirty_vram ) 1.68 + if ( paging_mode_log_dirty(d) && dirty_vram ) 1.69 { 1.70 - if ( begin_pfn != d->dirty_vram->begin_pfn || 1.71 - begin_pfn + nr != d->dirty_vram->end_pfn ) 1.72 + if ( begin_pfn != dirty_vram->begin_pfn || 1.73 + begin_pfn + nr != dirty_vram->end_pfn ) 1.74 { 1.75 paging_log_dirty_disable(d); 1.76 - d->dirty_vram->begin_pfn = begin_pfn; 1.77 - d->dirty_vram->end_pfn = begin_pfn + nr; 1.78 + dirty_vram->begin_pfn = begin_pfn; 1.79 + dirty_vram->end_pfn = begin_pfn + nr; 1.80 rc = paging_log_dirty_enable(d); 1.81 if (rc != 0) 1.82 goto param_fail; 1.83 } 1.84 } 1.85 - else if ( !paging_mode_log_dirty(d) && !d->dirty_vram ) 1.86 + else if ( !paging_mode_log_dirty(d) && !dirty_vram ) 1.87 { 1.88 rc -ENOMEM; 1.89 - if ( (d->dirty_vram = xmalloc(struct sh_dirty_vram)) == NULL ) 1.90 + if ( (dirty_vram = xmalloc(struct sh_dirty_vram)) == NULL ) 1.91 goto param_fail; 1.92 1.93 - d->dirty_vram->begin_pfn = begin_pfn; 1.94 - d->dirty_vram->end_pfn = begin_pfn + nr; 1.95 + dirty_vram->begin_pfn = begin_pfn; 1.96 + dirty_vram->end_pfn = begin_pfn + nr; 1.97 + d->arch.hvm_domain.dirty_vram = dirty_vram; 1.98 hap_vram_tracking_init(d); 1.99 rc = paging_log_dirty_enable(d); 1.100 if (rc != 0) 1.101 @@ -152,7 +157,7 @@ int hap_track_dirty_vram(struct domain * 1.102 } 1.103 else 1.104 { 1.105 - if ( !paging_mode_log_dirty(d) && d->dirty_vram ) 1.106 + if ( !paging_mode_log_dirty(d) && dirty_vram ) 1.107 rc = -EINVAL; 1.108 else 1.109 rc = -ENODATA; 1.110 @@ -163,10 +168,10 @@ int hap_track_dirty_vram(struct domain * 1.111 } 1.112 else 1.113 { 1.114 - if ( paging_mode_log_dirty(d) && d->dirty_vram ) { 1.115 + if ( paging_mode_log_dirty(d) && dirty_vram ) { 1.116 rc = paging_log_dirty_disable(d); 1.117 - xfree(d->dirty_vram); 1.118 - d->dirty_vram = NULL; 1.119 + xfree(dirty_vram); 1.120 + dirty_vram = d->arch.hvm_domain.dirty_vram = NULL; 1.121 } else 1.122 rc = 0; 1.123 } 1.124 @@ -174,10 +179,10 @@ int hap_track_dirty_vram(struct domain * 1.125 return rc; 1.126 1.127 param_fail: 1.128 - if ( d->dirty_vram ) 1.129 + if ( dirty_vram ) 1.130 { 1.131 - xfree(d->dirty_vram); 1.132 - d->dirty_vram = NULL; 1.133 + xfree(dirty_vram); 1.134 + dirty_vram = d->arch.hvm_domain.dirty_vram = NULL; 1.135 } 1.136 return rc; 1.137 } 1.138 @@ -220,11 +225,12 @@ void hap_clean_dirty_bitmap(struct domai 1.139 1.140 void hap_logdirty_init(struct domain *d) 1.141 { 1.142 - if ( paging_mode_log_dirty(d) && d->dirty_vram ) 1.143 + struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; 1.144 + if ( paging_mode_log_dirty(d) && dirty_vram ) 1.145 { 1.146 paging_log_dirty_disable(d); 1.147 - xfree(d->dirty_vram); 1.148 - d->dirty_vram = NULL; 1.149 + xfree(dirty_vram); 1.150 + dirty_vram = d->arch.hvm_domain.dirty_vram = NULL; 1.151 } 1.152 1.153 /* Reinitialize logdirty mechanism */
2.1 --- a/xen/arch/x86/mm/shadow/common.c Fri Jun 05 09:32:03 2009 +0100 2.2 +++ b/xen/arch/x86/mm/shadow/common.c Fri Jun 05 14:04:03 2009 +0100 2.3 @@ -3259,11 +3259,11 @@ void shadow_teardown(struct domain *d) 2.4 * calls now that we've torn down the bitmap */ 2.5 d->arch.paging.mode &= ~PG_log_dirty; 2.6 2.7 - if (d->dirty_vram) { 2.8 - xfree(d->dirty_vram->sl1ma); 2.9 - xfree(d->dirty_vram->dirty_bitmap); 2.10 - xfree(d->dirty_vram); 2.11 - d->dirty_vram = NULL; 2.12 + if (d->arch.hvm_domain.dirty_vram) { 2.13 + xfree(d->arch.hvm_domain.dirty_vram->sl1ma); 2.14 + xfree(d->arch.hvm_domain.dirty_vram->dirty_bitmap); 2.15 + xfree(d->arch.hvm_domain.dirty_vram); 2.16 + d->arch.hvm_domain.dirty_vram = NULL; 2.17 } 2.18 2.19 shadow_unlock(d); 2.20 @@ -3583,6 +3583,7 @@ int shadow_track_dirty_vram(struct domai 2.21 int flush_tlb = 0; 2.22 unsigned long i; 2.23 p2m_type_t t; 2.24 + struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; 2.25 2.26 if (end_pfn < begin_pfn 2.27 || begin_pfn > d->arch.p2m->max_mapped_pfn 2.28 @@ -3591,16 +3592,16 @@ int shadow_track_dirty_vram(struct domai 2.29 2.30 shadow_lock(d); 2.31 2.32 - if ( d->dirty_vram && (!nr || 2.33 - ( begin_pfn != d->dirty_vram->begin_pfn 2.34 - || end_pfn != d->dirty_vram->end_pfn )) ) 2.35 + if ( dirty_vram && (!nr || 2.36 + ( begin_pfn != dirty_vram->begin_pfn 2.37 + || end_pfn != dirty_vram->end_pfn )) ) 2.38 { 2.39 /* Different tracking, tear the previous down. */ 2.40 - gdprintk(XENLOG_INFO, "stopping tracking VRAM %lx - %lx\n", d->dirty_vram->begin_pfn, d->dirty_vram->end_pfn); 2.41 - xfree(d->dirty_vram->sl1ma); 2.42 - xfree(d->dirty_vram->dirty_bitmap); 2.43 - xfree(d->dirty_vram); 2.44 - d->dirty_vram = NULL; 2.45 + gdprintk(XENLOG_INFO, "stopping tracking VRAM %lx - %lx\n", dirty_vram->begin_pfn, dirty_vram->end_pfn); 2.46 + xfree(dirty_vram->sl1ma); 2.47 + xfree(dirty_vram->dirty_bitmap); 2.48 + xfree(dirty_vram); 2.49 + dirty_vram = d->arch.hvm_domain.dirty_vram = NULL; 2.50 } 2.51 2.52 if ( !nr ) 2.53 @@ -3611,7 +3612,7 @@ int shadow_track_dirty_vram(struct domai 2.54 2.55 /* This should happen seldomly (Video mode change), 2.56 * no need to be careful. */ 2.57 - if ( !d->dirty_vram ) 2.58 + if ( !dirty_vram ) 2.59 { 2.60 /* Just recount from start. */ 2.61 for ( i = begin_pfn; i < end_pfn; i++ ) { 2.62 @@ -3623,29 +3624,30 @@ int shadow_track_dirty_vram(struct domai 2.63 gdprintk(XENLOG_INFO, "tracking VRAM %lx - %lx\n", begin_pfn, end_pfn); 2.64 2.65 rc = -ENOMEM; 2.66 - if ( (d->dirty_vram = xmalloc(struct sh_dirty_vram)) == NULL ) 2.67 + if ( (dirty_vram = xmalloc(struct sh_dirty_vram)) == NULL ) 2.68 goto out; 2.69 - d->dirty_vram->begin_pfn = begin_pfn; 2.70 - d->dirty_vram->end_pfn = end_pfn; 2.71 - 2.72 - if ( (d->dirty_vram->sl1ma = xmalloc_array(paddr_t, nr)) == NULL ) 2.73 + dirty_vram->begin_pfn = begin_pfn; 2.74 + dirty_vram->end_pfn = end_pfn; 2.75 + d->arch.hvm_domain.dirty_vram = dirty_vram; 2.76 + 2.77 + if ( (dirty_vram->sl1ma = xmalloc_array(paddr_t, nr)) == NULL ) 2.78 goto out_dirty_vram; 2.79 - memset(d->dirty_vram->sl1ma, ~0, sizeof(paddr_t) * nr); 2.80 - 2.81 - if ( (d->dirty_vram->dirty_bitmap = xmalloc_array(uint8_t, dirty_size)) == NULL ) 2.82 + memset(dirty_vram->sl1ma, ~0, sizeof(paddr_t) * nr); 2.83 + 2.84 + if ( (dirty_vram->dirty_bitmap = xmalloc_array(uint8_t, dirty_size)) == NULL ) 2.85 goto out_sl1ma; 2.86 - memset(d->dirty_vram->dirty_bitmap, 0, dirty_size); 2.87 - 2.88 - d->dirty_vram->last_dirty = NOW(); 2.89 + memset(dirty_vram->dirty_bitmap, 0, dirty_size); 2.90 + 2.91 + dirty_vram->last_dirty = NOW(); 2.92 2.93 /* Tell the caller that this time we could not track dirty bits. */ 2.94 rc = -ENODATA; 2.95 } 2.96 - else if (d->dirty_vram->last_dirty == -1) 2.97 + else if (dirty_vram->last_dirty == -1) 2.98 { 2.99 /* still completely clean, just copy our empty bitmap */ 2.100 rc = -EFAULT; 2.101 - if ( copy_to_guest(dirty_bitmap, d->dirty_vram->dirty_bitmap, dirty_size) == 0 ) 2.102 + if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) == 0 ) 2.103 rc = 0; 2.104 } 2.105 else 2.106 @@ -3660,7 +3662,7 @@ int shadow_track_dirty_vram(struct domai 2.107 mfn_t mfn = gfn_to_mfn(d, begin_pfn + i, &t); 2.108 struct page_info *page; 2.109 int dirty = 0; 2.110 - paddr_t sl1ma = d->dirty_vram->sl1ma[i]; 2.111 + paddr_t sl1ma = dirty_vram->sl1ma[i]; 2.112 2.113 if (mfn_x(mfn) == INVALID_MFN) 2.114 { 2.115 @@ -3724,8 +3726,8 @@ int shadow_track_dirty_vram(struct domai 2.116 2.117 if ( dirty ) 2.118 { 2.119 - d->dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8); 2.120 - d->dirty_vram->last_dirty = NOW(); 2.121 + dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8); 2.122 + dirty_vram->last_dirty = NOW(); 2.123 } 2.124 } 2.125 2.126 @@ -3735,9 +3737,9 @@ int shadow_track_dirty_vram(struct domai 2.127 #endif 2.128 2.129 rc = -EFAULT; 2.130 - if ( copy_to_guest(dirty_bitmap, d->dirty_vram->dirty_bitmap, dirty_size) == 0 ) { 2.131 - memset(d->dirty_vram->dirty_bitmap, 0, dirty_size); 2.132 - if (d->dirty_vram->last_dirty + SECONDS(2) < NOW()) 2.133 + if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) == 0 ) { 2.134 + memset(dirty_vram->dirty_bitmap, 0, dirty_size); 2.135 + if (dirty_vram->last_dirty + SECONDS(2) < NOW()) 2.136 { 2.137 /* was clean for more than two seconds, try to disable guest 2.138 * write access */ 2.139 @@ -3746,7 +3748,7 @@ int shadow_track_dirty_vram(struct domai 2.140 if (mfn_x(mfn) != INVALID_MFN) 2.141 flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 1, 0); 2.142 } 2.143 - d->dirty_vram->last_dirty = -1; 2.144 + dirty_vram->last_dirty = -1; 2.145 } 2.146 rc = 0; 2.147 } 2.148 @@ -3756,10 +3758,10 @@ int shadow_track_dirty_vram(struct domai 2.149 goto out; 2.150 2.151 out_sl1ma: 2.152 - xfree(d->dirty_vram->sl1ma); 2.153 + xfree(dirty_vram->sl1ma); 2.154 out_dirty_vram: 2.155 - xfree(d->dirty_vram); 2.156 - d->dirty_vram = NULL; 2.157 + xfree(dirty_vram); 2.158 + dirty_vram = d->arch.hvm_domain.dirty_vram = NULL; 2.159 2.160 out: 2.161 shadow_unlock(d);
3.1 --- a/xen/arch/x86/mm/shadow/multi.c Fri Jun 05 09:32:03 2009 +0100 3.2 +++ b/xen/arch/x86/mm/shadow/multi.c Fri Jun 05 14:04:03 2009 +0100 3.3 @@ -475,6 +475,7 @@ static always_inline void 3.4 guest_l1e_t guest_entry = { guest_intpte }; 3.5 shadow_l1e_t *sp = shadow_entry_ptr; 3.6 struct domain *d = v->domain; 3.7 + struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; 3.8 gfn_t target_gfn = guest_l1e_get_gfn(guest_entry); 3.9 u32 pass_thru_flags; 3.10 u32 gflags, sflags; 3.11 @@ -615,13 +616,13 @@ static always_inline void 3.12 } 3.13 } 3.14 3.15 - if ( unlikely((level == 1) && d->dirty_vram 3.16 - && d->dirty_vram->last_dirty == -1 3.17 - && gfn_x(target_gfn) >= d->dirty_vram->begin_pfn 3.18 - && gfn_x(target_gfn) < d->dirty_vram->end_pfn) ) 3.19 + if ( unlikely((level == 1) && dirty_vram 3.20 + && dirty_vram->last_dirty == -1 3.21 + && gfn_x(target_gfn) >= dirty_vram->begin_pfn 3.22 + && gfn_x(target_gfn) < dirty_vram->end_pfn) ) 3.23 { 3.24 if ( ft & FETCH_TYPE_WRITE ) 3.25 - d->dirty_vram->last_dirty = NOW(); 3.26 + dirty_vram->last_dirty = NOW(); 3.27 else 3.28 sflags &= ~_PAGE_RW; 3.29 } 3.30 @@ -1042,22 +1043,23 @@ static inline void shadow_vram_get_l1e(s 3.31 mfn_t mfn = shadow_l1e_get_mfn(new_sl1e); 3.32 int flags = shadow_l1e_get_flags(new_sl1e); 3.33 unsigned long gfn; 3.34 - 3.35 - if ( !d->dirty_vram /* tracking disabled? */ 3.36 + struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; 3.37 + 3.38 + if ( !dirty_vram /* tracking disabled? */ 3.39 || !(flags & _PAGE_RW) /* read-only mapping? */ 3.40 || !mfn_valid(mfn) ) /* mfn can be invalid in mmio_direct */ 3.41 return; 3.42 3.43 gfn = mfn_to_gfn(d, mfn); 3.44 3.45 - if ( (gfn >= d->dirty_vram->begin_pfn) && (gfn < d->dirty_vram->end_pfn) ) 3.46 + if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) ) 3.47 { 3.48 - unsigned long i = gfn - d->dirty_vram->begin_pfn; 3.49 + unsigned long i = gfn - dirty_vram->begin_pfn; 3.50 struct page_info *page = mfn_to_page(mfn); 3.51 3.52 if ( (page->u.inuse.type_info & PGT_count_mask) == 1 ) 3.53 /* Initial guest reference, record it */ 3.54 - d->dirty_vram->sl1ma[i] = pfn_to_paddr(mfn_x(sl1mfn)) 3.55 + dirty_vram->sl1ma[i] = pfn_to_paddr(mfn_x(sl1mfn)) 3.56 | ((unsigned long)sl1e & ~PAGE_MASK); 3.57 } 3.58 } 3.59 @@ -1070,17 +1072,18 @@ static inline void shadow_vram_put_l1e(s 3.60 mfn_t mfn = shadow_l1e_get_mfn(old_sl1e); 3.61 int flags = shadow_l1e_get_flags(old_sl1e); 3.62 unsigned long gfn; 3.63 - 3.64 - if ( !d->dirty_vram /* tracking disabled? */ 3.65 + struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; 3.66 + 3.67 + if ( !dirty_vram /* tracking disabled? */ 3.68 || !(flags & _PAGE_RW) /* read-only mapping? */ 3.69 || !mfn_valid(mfn) ) /* mfn can be invalid in mmio_direct */ 3.70 return; 3.71 3.72 gfn = mfn_to_gfn(d, mfn); 3.73 3.74 - if ( (gfn >= d->dirty_vram->begin_pfn) && (gfn < d->dirty_vram->end_pfn) ) 3.75 + if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) ) 3.76 { 3.77 - unsigned long i = gfn - d->dirty_vram->begin_pfn; 3.78 + unsigned long i = gfn - dirty_vram->begin_pfn; 3.79 struct page_info *page = mfn_to_page(mfn); 3.80 int dirty = 0; 3.81 paddr_t sl1ma = pfn_to_paddr(mfn_x(sl1mfn)) 3.82 @@ -1089,14 +1092,14 @@ static inline void shadow_vram_put_l1e(s 3.83 if ( (page->u.inuse.type_info & PGT_count_mask) == 1 ) 3.84 { 3.85 /* Last reference */ 3.86 - if ( d->dirty_vram->sl1ma[i] == INVALID_PADDR ) { 3.87 + if ( dirty_vram->sl1ma[i] == INVALID_PADDR ) { 3.88 /* We didn't know it was that one, let's say it is dirty */ 3.89 dirty = 1; 3.90 } 3.91 else 3.92 { 3.93 - ASSERT(d->dirty_vram->sl1ma[i] == sl1ma); 3.94 - d->dirty_vram->sl1ma[i] = INVALID_PADDR; 3.95 + ASSERT(dirty_vram->sl1ma[i] == sl1ma); 3.96 + dirty_vram->sl1ma[i] = INVALID_PADDR; 3.97 if ( flags & _PAGE_DIRTY ) 3.98 dirty = 1; 3.99 } 3.100 @@ -1106,10 +1109,10 @@ static inline void shadow_vram_put_l1e(s 3.101 /* We had more than one reference, just consider the page dirty. */ 3.102 dirty = 1; 3.103 /* Check that it's not the one we recorded. */ 3.104 - if ( d->dirty_vram->sl1ma[i] == sl1ma ) 3.105 + if ( dirty_vram->sl1ma[i] == sl1ma ) 3.106 { 3.107 /* Too bad, we remembered the wrong one... */ 3.108 - d->dirty_vram->sl1ma[i] = INVALID_PADDR; 3.109 + dirty_vram->sl1ma[i] = INVALID_PADDR; 3.110 } 3.111 else 3.112 { 3.113 @@ -1119,8 +1122,8 @@ static inline void shadow_vram_put_l1e(s 3.114 } 3.115 if ( dirty ) 3.116 { 3.117 - d->dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8); 3.118 - d->dirty_vram->last_dirty = NOW(); 3.119 + dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8); 3.120 + dirty_vram->last_dirty = NOW(); 3.121 } 3.122 } 3.123 }
4.1 --- a/xen/include/asm-x86/hvm/domain.h Fri Jun 05 09:32:03 2009 +0100 4.2 +++ b/xen/include/asm-x86/hvm/domain.h Fri Jun 05 14:04:03 2009 +0100 4.3 @@ -66,6 +66,9 @@ struct hvm_domain { 4.4 /* Memory ranges with pinned cache attributes. */ 4.5 struct list_head pinned_cacheattr_ranges; 4.6 4.7 + /* VRAM dirty support. */ 4.8 + struct sh_dirty_vram *dirty_vram; 4.9 + 4.10 /* If one of vcpus of this domain is in no_fill_mode or 4.11 * mtrr/pat between vcpus is not the same, set is_in_uc_mode 4.12 */
5.1 --- a/xen/include/xen/sched.h Fri Jun 05 09:32:03 2009 +0100 5.2 +++ b/xen/include/xen/sched.h Fri Jun 05 14:04:03 2009 +0100 5.3 @@ -268,9 +268,6 @@ struct domain 5.4 */ 5.5 spinlock_t hypercall_deadlock_mutex; 5.6 5.7 - /* VRAM dirty support. */ 5.8 - struct sh_dirty_vram *dirty_vram; 5.9 - 5.10 /* transcendent memory, auto-allocated on first tmem op by each domain */ 5.11 void *tmem; 5.12 };