ia64/xen-unstable

annotate xen/include/asm-x86/shadow.h @ 3813:4b4a77f35103

bitkeeper revision 1.1159.260.2 (420e0856crqXXEkQoCUddas8u5ksXA)

Rename check_pagetable() function to _check_pagetable to make it easier
to selectively enable it during debugging. There's still the same
check_pagetable macro, but now it's used both when SHADOW_DEBUG is
enabled and disabled.
author mafetter@fleming.research
date Sat Feb 12 13:44:54 2005 +0000 (2005-02-12)
parents 4644bea63898
children 58be428f51a8
rev   line source
djm@1648 1 /* -*- Mode:C; c-basic-offset:4; tab-width:4 -*- */
djm@1648 2
djm@1648 3 #ifndef _XEN_SHADOW_H
djm@1648 4 #define _XEN_SHADOW_H
djm@1648 5
djm@1648 6 #include <xen/config.h>
djm@1648 7 #include <xen/types.h>
djm@1648 8 #include <xen/perfc.h>
djm@1648 9 #include <asm/processor.h>
djm@1648 10
djm@1648 11 /* Shadow PT flag bits in pfn_info */
djm@1648 12 #define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */
djm@1648 13 #define PSH_pfn_mask ((1<<21)-1)
djm@1648 14
djm@1648 15 /* Shadow PT operation mode : shadowmode variable in mm_struct */
djm@1648 16 #define SHM_test (1) /* just run domain on shadow PTs */
djm@1648 17 #define SHM_logdirty (2) /* log pages that are dirtied */
djm@1648 18 #define SHM_translate (3) /* lookup machine pages in translation table */
kaf24@2635 19 #define SHM_cow (4) /* copy on write all dirtied pages */
djm@1648 20
djm@1648 21 #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
kaf24@2635 22 #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
kaf24@2635 23 (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
djm@1648 24
kaf24@1749 25 #define shadow_mode(_d) ((_d)->mm.shadow_mode)
kaf24@1749 26 #define shadow_lock_init(_d) spin_lock_init(&(_d)->mm.shadow_lock)
kaf24@2728 27 #define shadow_lock(_m) spin_lock(&(_m)->shadow_lock)
kaf24@2728 28 #define shadow_unlock(_m) spin_unlock(&(_m)->shadow_lock)
kaf24@1749 29
djm@1648 30 extern void shadow_mode_init(void);
kaf24@1749 31 extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
kaf24@1749 32 extern int shadow_fault(unsigned long va, long error_code);
kaf24@2635 33 extern void shadow_l1_normal_pt_update(
kaf24@2635 34 unsigned long pa, unsigned long gpte,
kaf24@2635 35 unsigned long *prev_spfn_ptr, l1_pgentry_t **prev_spl1e_ptr);
kaf24@1749 36 extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpte);
kaf24@1749 37 extern void unshadow_table(unsigned long gpfn, unsigned int type);
kaf24@1749 38 extern int shadow_mode_enable(struct domain *p, unsigned int mode);
kaf24@1749 39
kaf24@1749 40 extern void __shadow_mode_disable(struct domain *d);
kaf24@1749 41 static inline void shadow_mode_disable(struct domain *d)
kaf24@1749 42 {
kaf24@1749 43 if ( shadow_mode(d) )
kaf24@1749 44 __shadow_mode_disable(d);
kaf24@1749 45 }
kaf24@1749 46
djm@1648 47 extern unsigned long shadow_l2_table(
kaf24@2635 48 struct mm_struct *m, unsigned long gpfn);
djm@1648 49
kaf24@2635 50 #define SHADOW_DEBUG 0
djm@1648 51 #define SHADOW_HASH_DEBUG 0
djm@1648 52
djm@1648 53 struct shadow_status {
kaf24@2635 54 unsigned long pfn; /* Guest pfn. */
kaf24@2635 55 unsigned long spfn_and_flags; /* Shadow pfn plus flags. */
kaf24@2635 56 struct shadow_status *next; /* Pull-to-front list. */
djm@1648 57 };
djm@1648 58
kaf24@2635 59 #define shadow_ht_extra_size 128
kaf24@2635 60 #define shadow_ht_buckets 256
djm@1648 61
kaf24@2059 62 #ifdef VERBOSE
djm@1648 63 #define SH_LOG(_f, _a...) \
djm@1648 64 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
kaf24@2710 65 current->id , __LINE__ , ## _a )
djm@1648 66 #else
djm@1648 67 #define SH_LOG(_f, _a...)
djm@1648 68 #endif
djm@1648 69
djm@1648 70 #if SHADOW_DEBUG
djm@1648 71 #define SH_VLOG(_f, _a...) \
djm@1648 72 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
kaf24@2710 73 current->id , __LINE__ , ## _a )
djm@1648 74 #else
djm@1648 75 #define SH_VLOG(_f, _a...)
djm@1648 76 #endif
djm@1648 77
djm@1648 78 #if 0
djm@1648 79 #define SH_VVLOG(_f, _a...) \
djm@1648 80 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
kaf24@2710 81 current->id , __LINE__ , ## _a )
djm@1648 82 #else
djm@1648 83 #define SH_VVLOG(_f, _a...)
djm@1648 84 #endif
djm@1648 85
djm@1648 86
djm@1648 87 /************************************************************************/
djm@1648 88
kaf24@2635 89 static inline int __mark_dirty( struct mm_struct *m, unsigned int mfn)
djm@1648 90 {
kaf24@2635 91 unsigned long pfn;
kaf24@2635 92 int rc = 0;
djm@1648 93
djm@1648 94 ASSERT(spin_is_locked(&m->shadow_lock));
kaf24@2635 95 ASSERT(m->shadow_dirty_bitmap != NULL);
djm@1648 96
djm@1648 97 pfn = machine_to_phys_mapping[mfn];
djm@1648 98
kaf24@2635 99 /*
kaf24@2635 100 * Values with the MSB set denote MFNs that aren't really part of the
kaf24@2635 101 * domain's pseudo-physical memory map (e.g., the shared info frame).
kaf24@2635 102 * Nothing to do here...
kaf24@2635 103 */
kaf24@2635 104 if ( unlikely(pfn & 0x80000000UL) )
kaf24@2635 105 return rc;
djm@1648 106
kaf24@2635 107 if ( likely(pfn < m->shadow_dirty_bitmap_size) )
djm@1648 108 {
kaf24@2635 109 /* N.B. Can use non-atomic TAS because protected by shadow_lock. */
kaf24@2635 110 if ( !__test_and_set_bit(pfn, m->shadow_dirty_bitmap) )
kaf24@1749 111 {
kaf24@1749 112 m->shadow_dirty_count++;
kaf24@1749 113 rc = 1;
kaf24@1749 114 }
djm@1648 115 }
kaf24@2635 116 #ifndef NDEBUG
kaf24@2635 117 else if ( mfn < max_page )
djm@1648 118 {
kaf24@3041 119 unsigned long *esp;
iap10@2740 120 SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (mm %p)",
kaf24@2635 121 mfn, pfn, m->shadow_dirty_bitmap_size, m );
kaf24@2635 122 SH_LOG("dom=%p caf=%08x taf=%08x\n",
kaf24@2635 123 frame_table[mfn].u.inuse.domain,
kaf24@2635 124 frame_table[mfn].count_info,
kaf24@2635 125 frame_table[mfn].u.inuse.type_info );
kaf24@3041 126 __asm__ __volatile__ ("movl %%esp,%0" : "=r" (esp) : );
kaf24@3041 127 show_trace(esp);
djm@1648 128 }
kaf24@2635 129 #endif
kaf24@1749 130
djm@1648 131 return rc;
djm@1648 132 }
djm@1648 133
djm@1648 134
kaf24@2635 135 static inline int mark_dirty(struct mm_struct *m, unsigned int mfn)
djm@1648 136 {
djm@1648 137 int rc;
kaf24@2378 138 shadow_lock(m);
kaf24@2635 139 rc = __mark_dirty(m, mfn);
kaf24@2378 140 shadow_unlock(m);
djm@1648 141 return rc;
djm@1648 142 }
djm@1648 143
djm@1648 144
djm@1648 145 /************************************************************************/
djm@1648 146
kaf24@1749 147 static inline void l1pte_write_fault(
kaf24@1749 148 struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
djm@1648 149 {
djm@1648 150 unsigned long gpte = *gpte_p;
djm@1648 151 unsigned long spte = *spte_p;
djm@1648 152
kaf24@2635 153 ASSERT(gpte & _PAGE_RW);
kaf24@2635 154
kaf24@2635 155 gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
kaf24@2635 156
kaf24@1749 157 switch ( m->shadow_mode )
djm@1648 158 {
djm@1648 159 case SHM_test:
kaf24@2635 160 spte = gpte | _PAGE_RW;
djm@1648 161 break;
djm@1648 162
djm@1648 163 case SHM_logdirty:
kaf24@2635 164 spte = gpte | _PAGE_RW;
kaf24@2635 165 __mark_dirty(m, gpte >> PAGE_SHIFT);
djm@1648 166 break;
djm@1648 167 }
djm@1648 168
djm@1648 169 *gpte_p = gpte;
djm@1648 170 *spte_p = spte;
djm@1648 171 }
djm@1648 172
kaf24@1749 173 static inline void l1pte_read_fault(
kaf24@1749 174 struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
djm@1648 175 {
djm@1648 176 unsigned long gpte = *gpte_p;
djm@1648 177 unsigned long spte = *spte_p;
djm@1648 178
kaf24@2635 179 gpte |= _PAGE_ACCESSED;
kaf24@2635 180
kaf24@1749 181 switch ( m->shadow_mode )
djm@1648 182 {
djm@1648 183 case SHM_test:
kaf24@2635 184 spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
djm@1648 185 break;
djm@1648 186
djm@1648 187 case SHM_logdirty:
kaf24@2635 188 spte = gpte & ~_PAGE_RW;
djm@1648 189 break;
djm@1648 190 }
djm@1648 191
djm@1648 192 *gpte_p = gpte;
djm@1648 193 *spte_p = spte;
djm@1648 194 }
djm@1648 195
kaf24@2635 196 static inline void l1pte_propagate_from_guest(
kaf24@1749 197 struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
djm@1648 198 {
djm@1648 199 unsigned long gpte = *gpte_p;
djm@1648 200 unsigned long spte = *spte_p;
djm@1648 201
kaf24@1749 202 switch ( m->shadow_mode )
djm@1648 203 {
djm@1648 204 case SHM_test:
djm@1648 205 spte = 0;
djm@1648 206 if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
djm@1648 207 (_PAGE_PRESENT|_PAGE_ACCESSED) )
kaf24@2635 208 spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
djm@1648 209 break;
djm@1648 210
djm@1648 211 case SHM_logdirty:
djm@1648 212 spte = 0;
djm@1648 213 if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
djm@1648 214 (_PAGE_PRESENT|_PAGE_ACCESSED) )
kaf24@2635 215 spte = gpte & ~_PAGE_RW;
djm@1648 216 break;
djm@1648 217 }
djm@1648 218
djm@1648 219 *gpte_p = gpte;
djm@1648 220 *spte_p = spte;
djm@1648 221 }
djm@1648 222
kaf24@1749 223 static inline void l2pde_general(
kaf24@2635 224 struct mm_struct *m,
kaf24@1749 225 unsigned long *gpde_p,
kaf24@1749 226 unsigned long *spde_p,
kaf24@1749 227 unsigned long sl1pfn)
djm@1648 228 {
djm@1648 229 unsigned long gpde = *gpde_p;
djm@1648 230 unsigned long spde = *spde_p;
djm@1648 231
djm@1648 232 spde = 0;
djm@1648 233
kaf24@2635 234 if ( sl1pfn != 0 )
djm@1648 235 {
kaf24@2635 236 spde = (gpde & ~PAGE_MASK) | (sl1pfn << PAGE_SHIFT) |
djm@1648 237 _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
kaf24@2635 238 gpde |= _PAGE_ACCESSED | _PAGE_DIRTY;
djm@1648 239
kaf24@2635 240 /* Detect linear p.t. mappings and write-protect them. */
kaf24@2635 241 if ( (frame_table[sl1pfn].u.inuse.type_info & PGT_type_mask) ==
kaf24@2635 242 PGT_l2_page_table )
djm@1648 243 spde = gpde & ~_PAGE_RW;
djm@1648 244 }
djm@1648 245
djm@1648 246 *gpde_p = gpde;
djm@1648 247 *spde_p = spde;
djm@1648 248 }
djm@1648 249
djm@1648 250 /*********************************************************************/
djm@1648 251
djm@1648 252 #if SHADOW_HASH_DEBUG
djm@1648 253 static void shadow_audit(struct mm_struct *m, int print)
djm@1648 254 {
kaf24@2635 255 int live = 0, free = 0, j = 0, abs;
djm@1648 256 struct shadow_status *a;
djm@1648 257
kaf24@2635 258 for ( j = 0; j < shadow_ht_buckets; j++ )
djm@1648 259 {
djm@1648 260 a = &m->shadow_ht[j];
kaf24@2635 261 if ( a->pfn ) { live++; ASSERT(a->spfn_and_flags & PSH_pfn_mask); }
kaf24@2635 262 ASSERT(a->pfn < 0x00100000UL);
kaf24@2635 263 a = a->next;
kaf24@2635 264 while ( a && (live < 9999) )
djm@1648 265 {
djm@1648 266 live++;
kaf24@2635 267 if ( (a->pfn == 0) || (a->spfn_and_flags == 0) )
djm@1648 268 {
djm@1648 269 printk("XXX live=%d pfn=%08lx sp=%08lx next=%p\n",
djm@1648 270 live, a->pfn, a->spfn_and_flags, a->next);
djm@1648 271 BUG();
djm@1648 272 }
kaf24@2635 273 ASSERT(a->pfn < 0x00100000UL);
kaf24@2635 274 ASSERT(a->spfn_and_flags & PSH_pfn_mask);
kaf24@2635 275 a = a->next;
djm@1648 276 }
kaf24@2635 277 ASSERT(live < 9999);
djm@1648 278 }
djm@1648 279
kaf24@2635 280 for ( a = m->shadow_ht_free; a != NULL; a = a->next )
kaf24@2635 281 free++;
djm@1648 282
kaf24@2635 283 if ( print)
kaf24@2635 284 printk("Xlive=%d free=%d\n",live,free);
djm@1648 285
kaf24@2635 286 abs = (perfc_value(shadow_l1_pages) + perfc_value(shadow_l2_pages)) - live;
kaf24@2635 287 if ( (abs < -1) || (abs > 1) )
djm@1648 288 {
djm@1648 289 printk("live=%d free=%d l1=%d l2=%d\n",live,free,
djm@1648 290 perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages) );
djm@1648 291 BUG();
djm@1648 292 }
djm@1648 293 }
djm@1648 294 #else
kaf24@2635 295 #define shadow_audit(p, print) ((void)0)
djm@1648 296 #endif
djm@1648 297
djm@1648 298
djm@1648 299
kaf24@2635 300 static inline struct shadow_status *hash_bucket(
kaf24@2635 301 struct mm_struct *m, unsigned int gpfn)
djm@1648 302 {
kaf24@2635 303 return &m->shadow_ht[gpfn % shadow_ht_buckets];
djm@1648 304 }
djm@1648 305
djm@1648 306
kaf24@2635 307 static inline unsigned long __shadow_status(
kaf24@2635 308 struct mm_struct *m, unsigned int gpfn)
djm@1648 309 {
kaf24@2635 310 struct shadow_status *p, *x, *head;
djm@1648 311
kaf24@2635 312 x = head = hash_bucket(m, gpfn);
kaf24@2635 313 p = NULL;
djm@1648 314
kaf24@2635 315 SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x);
kaf24@2635 316 shadow_audit(m, 0);
djm@1648 317
djm@1648 318 do
djm@1648 319 {
kaf24@2635 320 ASSERT(x->pfn || ((x == head) && (x->next == NULL)));
djm@1648 321
kaf24@2635 322 if ( x->pfn == gpfn )
kaf24@2635 323 {
kaf24@2635 324 /* Pull-to-front if 'x' isn't already the head item. */
kaf24@2635 325 if ( unlikely(x != head) )
kaf24@2635 326 {
kaf24@2635 327 /* Delete 'x' from list and reinsert immediately after head. */
kaf24@2635 328 p->next = x->next;
kaf24@2635 329 x->next = head->next;
kaf24@2635 330 head->next = x;
djm@1648 331
kaf24@2635 332 /* Swap 'x' contents with head contents. */
kaf24@2635 333 SWAP(head->pfn, x->pfn);
kaf24@2635 334 SWAP(head->spfn_and_flags, x->spfn_and_flags);
djm@1648 335 }
kaf24@2635 336
kaf24@2635 337 return head->spfn_and_flags;
djm@1648 338 }
kaf24@2635 339
kaf24@2635 340 p = x;
kaf24@2635 341 x = x->next;
djm@1648 342 }
kaf24@2635 343 while ( x != NULL );
djm@1648 344
djm@1648 345 return 0;
djm@1648 346 }
djm@1648 347
kaf24@2635 348 /*
kaf24@2635 349 * N.B. We can make this locking more fine grained (e.g., per shadow page) if
kaf24@2635 350 * it ever becomes a problem, but since we need a spin lock on the hash table
kaf24@2635 351 * anyway it's probably not worth being too clever.
kaf24@2635 352 */
kaf24@2635 353 static inline unsigned long get_shadow_status(
kaf24@2635 354 struct mm_struct *m, unsigned int gpfn )
djm@1648 355 {
djm@1648 356 unsigned long res;
djm@1648 357
kaf24@2635 358 ASSERT(m->shadow_mode);
djm@1648 359
kaf24@2635 360 /*
kaf24@2635 361 * If we get here we know that some sort of update has happened to the
kaf24@2635 362 * underlying page table page: either a PTE has been updated, or the page
kaf24@2635 363 * has changed type. If we're in log dirty mode, we should set the
kaf24@2635 364 * appropriate bit in the dirty bitmap.
kaf24@2635 365 * N.B. The VA update path doesn't use this and is handled independently.
kaf24@2635 366 */
kaf24@2635 367
kaf24@2378 368 shadow_lock(m);
djm@1648 369
kaf24@2635 370 if ( m->shadow_mode == SHM_logdirty )
djm@1648 371 __mark_dirty( m, gpfn );
djm@1648 372
kaf24@2635 373 if ( !(res = __shadow_status(m, gpfn)) )
kaf24@2378 374 shadow_unlock(m);
kaf24@2635 375
djm@1648 376 return res;
djm@1648 377 }
djm@1648 378
djm@1648 379
kaf24@2635 380 static inline void put_shadow_status(
kaf24@2635 381 struct mm_struct *m)
djm@1648 382 {
kaf24@2378 383 shadow_unlock(m);
djm@1648 384 }
djm@1648 385
djm@1648 386
kaf24@2635 387 static inline void delete_shadow_status(
kaf24@2635 388 struct mm_struct *m, unsigned int gpfn)
djm@1648 389 {
kaf24@2635 390 struct shadow_status *p, *x, *n, *head;
djm@1648 391
djm@1648 392 ASSERT(spin_is_locked(&m->shadow_lock));
kaf24@2635 393 ASSERT(gpfn != 0);
djm@1648 394
kaf24@2635 395 head = hash_bucket(m, gpfn);
djm@1648 396
kaf24@2635 397 SH_VVLOG("delete gpfn=%08x bucket=%p", gpfn, b);
kaf24@2635 398 shadow_audit(m, 0);
djm@1648 399
kaf24@2635 400 /* Match on head item? */
kaf24@2635 401 if ( head->pfn == gpfn )
djm@1648 402 {
kaf24@2635 403 if ( (n = head->next) != NULL )
djm@1648 404 {
kaf24@2635 405 /* Overwrite head with contents of following node. */
kaf24@2635 406 head->pfn = n->pfn;
kaf24@2635 407 head->spfn_and_flags = n->spfn_and_flags;
djm@1648 408
kaf24@2635 409 /* Delete following node. */
kaf24@2635 410 head->next = n->next;
kaf24@2635 411
kaf24@2635 412 /* Add deleted node to the free list. */
kaf24@2635 413 n->pfn = 0;
kaf24@2635 414 n->spfn_and_flags = 0;
kaf24@2635 415 n->next = m->shadow_ht_free;
kaf24@2635 416 m->shadow_ht_free = n;
djm@1648 417 }
djm@1648 418 else
djm@1648 419 {
kaf24@2635 420 /* This bucket is now empty. Initialise the head node. */
kaf24@2635 421 head->pfn = 0;
kaf24@2635 422 head->spfn_and_flags = 0;
djm@1648 423 }
djm@1648 424
kaf24@2635 425 goto found;
djm@1648 426 }
djm@1648 427
kaf24@2635 428 p = head;
kaf24@2635 429 x = head->next;
djm@1648 430
djm@1648 431 do
djm@1648 432 {
kaf24@2635 433 if ( x->pfn == gpfn )
djm@1648 434 {
kaf24@2635 435 /* Delete matching node. */
kaf24@2635 436 p->next = x->next;
djm@1648 437
kaf24@2635 438 /* Add deleted node to the free list. */
kaf24@2635 439 x->pfn = 0;
kaf24@2635 440 x->spfn_and_flags = 0;
kaf24@2635 441 x->next = m->shadow_ht_free;
kaf24@2635 442 m->shadow_ht_free = x;
djm@1648 443
kaf24@2635 444 goto found;
djm@1648 445 }
djm@1648 446
kaf24@2635 447 p = x;
kaf24@2635 448 x = x->next;
djm@1648 449 }
kaf24@2635 450 while ( x != NULL );
djm@1648 451
kaf24@2635 452 /* If we got here, it wasn't in the list! */
djm@1648 453 BUG();
kaf24@2635 454
kaf24@2635 455 found:
kaf24@2635 456 shadow_audit(m, 0);
djm@1648 457 }
djm@1648 458
djm@1648 459
kaf24@2635 460 static inline void set_shadow_status(
kaf24@2635 461 struct mm_struct *m, unsigned int gpfn, unsigned long s)
djm@1648 462 {
kaf24@2635 463 struct shadow_status *x, *head, *extra;
djm@1648 464 int i;
djm@1648 465
djm@1648 466 ASSERT(spin_is_locked(&m->shadow_lock));
kaf24@2635 467 ASSERT(gpfn != 0);
kaf24@2635 468 ASSERT(s & PSH_shadowed);
djm@1648 469
kaf24@2635 470 x = head = hash_bucket(m, gpfn);
djm@1648 471
kaf24@2635 472 SH_VVLOG("set gpfn=%08x s=%08lx bucket=%p(%p)", gpfn, s, b, b->next);
kaf24@2635 473 shadow_audit(m, 0);
djm@1648 474
kaf24@2635 475 /*
kaf24@2635 476 * STEP 1. If page is already in the table, update it in place.
kaf24@2635 477 */
djm@1648 478
djm@1648 479 do
djm@1648 480 {
kaf24@2635 481 if ( x->pfn == gpfn )
djm@1648 482 {
kaf24@2635 483 x->spfn_and_flags = s;
kaf24@2635 484 goto done;
djm@1648 485 }
djm@1648 486
kaf24@2635 487 x = x->next;
djm@1648 488 }
kaf24@2635 489 while ( x != NULL );
djm@1648 490
kaf24@2635 491 /*
kaf24@2635 492 * STEP 2. The page must be inserted into the table.
kaf24@2635 493 */
djm@1648 494
kaf24@2635 495 /* If the bucket is empty then insert the new page as the head item. */
kaf24@2635 496 if ( head->pfn == 0 )
djm@1648 497 {
kaf24@2635 498 head->pfn = gpfn;
kaf24@2635 499 head->spfn_and_flags = s;
kaf24@2635 500 ASSERT(head->next == NULL);
kaf24@2635 501 goto done;
djm@1648 502 }
djm@1648 503
kaf24@2635 504 /* We need to allocate a new node. Ensure the quicklist is non-empty. */
kaf24@2635 505 if ( unlikely(m->shadow_ht_free == NULL) )
djm@1648 506 {
kaf24@2635 507 SH_LOG("Allocate more shadow hashtable blocks.");
kaf24@2635 508
kaf24@2635 509 extra = xmalloc(
kaf24@2635 510 sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
djm@1648 511
kaf24@2635 512 /* XXX Should be more graceful here. */
kaf24@2635 513 if ( extra == NULL )
kaf24@2635 514 BUG();
djm@1648 515
kaf24@2635 516 memset(extra, 0, sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
djm@1648 517
kaf24@2635 518 /* Record the allocation block so it can be correctly freed later. */
djm@1648 519 m->shadow_extras_count++;
kaf24@2635 520 *((struct shadow_status **)&extra[shadow_ht_extra_size]) =
kaf24@2635 521 m->shadow_ht_extras;
kaf24@2635 522 m->shadow_ht_extras = &extra[0];
djm@1648 523
kaf24@2635 524 /* Thread a free chain through the newly-allocated nodes. */
kaf24@2635 525 for ( i = 0; i < (shadow_ht_extra_size - 1); i++ )
kaf24@2635 526 extra[i].next = &extra[i+1];
kaf24@2635 527 extra[i].next = NULL;
djm@1648 528
kaf24@2635 529 /* Add the new nodes to the free list. */
kaf24@2635 530 m->shadow_ht_free = &extra[0];
djm@1648 531 }
djm@1648 532
kaf24@2635 533 /* Allocate a new node from the quicklist. */
kaf24@2635 534 x = m->shadow_ht_free;
kaf24@2635 535 m->shadow_ht_free = x->next;
djm@1648 536
kaf24@2635 537 /* Initialise the new node and insert directly after the head item. */
kaf24@2635 538 x->pfn = gpfn;
kaf24@2635 539 x->spfn_and_flags = s;
kaf24@2635 540 x->next = head->next;
kaf24@2635 541 head->next = x;
djm@1648 542
kaf24@2635 543 done:
kaf24@2635 544 shadow_audit(m, 0);
djm@1648 545 }
djm@1648 546
kaf24@2635 547 static inline void __shadow_mk_pagetable(struct mm_struct *mm)
djm@1648 548 {
kaf24@2635 549 unsigned long gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT;
kaf24@2635 550 unsigned long spfn = __shadow_status(mm, gpfn);
djm@1648 551
kaf24@2635 552 if ( unlikely(spfn == 0) )
kaf24@2635 553 spfn = shadow_l2_table(mm, gpfn);
kaf24@2635 554
kaf24@2635 555 mm->shadow_table = mk_pagetable(spfn << PAGE_SHIFT);
djm@1648 556 }
djm@1648 557
kaf24@2635 558 static inline void shadow_mk_pagetable(struct mm_struct *mm)
djm@1648 559 {
djm@1648 560 SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
djm@1648 561 pagetable_val(mm->pagetable), mm->shadow_mode );
djm@1648 562
djm@1648 563 if ( unlikely(mm->shadow_mode) )
djm@1648 564 {
kaf24@2378 565 shadow_lock(mm);
kaf24@2378 566 __shadow_mk_pagetable(mm);
kaf24@2378 567 shadow_unlock(mm);
djm@1648 568 }
djm@1648 569
djm@1648 570 SH_VVLOG("leaving shadow_mk_pagetable( gptbase=%08lx, mode=%d ) sh=%08lx",
djm@1648 571 pagetable_val(mm->pagetable), mm->shadow_mode,
djm@1648 572 pagetable_val(mm->shadow_table) );
djm@1648 573 }
djm@1648 574
djm@1648 575 #if SHADOW_DEBUG
mafetter@3813 576 extern int _check_pagetable(struct mm_struct *m, pagetable_t pt, char *s);
mafetter@3813 577 #define check_pagetable(m, pt, s) _check_pagetable(m, pt, s)
djm@1648 578 #else
djm@1648 579 #define check_pagetable(m, pt, s) ((void)0)
djm@1648 580 #endif
djm@1648 581
djm@1648 582 #endif /* XEN_SHADOW_H */