ia64/xen-unstable

changeset 2776:611b907c8330

bitkeeper revision 1.1159.1.307 (4182266cAGOQsr2Dkb1NULqhnlOgQg)

Thread a linked list of ballooned pages through the mem_map array.
Balloon driver should now work under migration and suspend / resume.
author mwilli2@equilibrium.research
date Fri Oct 29 11:15:56 2004 +0000 (2004-10-29)
parents ad13896e776c
children 0f8ac790fa96
files linux-2.6.9-xen-sparse/drivers/xen/balloon/balloon.c
line diff
     1.1 --- a/linux-2.6.9-xen-sparse/drivers/xen/balloon/balloon.c	Fri Oct 29 11:13:17 2004 +0000
     1.2 +++ b/linux-2.6.9-xen-sparse/drivers/xen/balloon/balloon.c	Fri Oct 29 11:15:56 2004 +0000
     1.3 @@ -60,6 +60,8 @@
     1.4  #include <asm/uaccess.h>
     1.5  #include <asm/tlb.h>
     1.6  
     1.7 +#include <linux/list.h>
     1.8 +
     1.9  /* USER DEFINES -- THESE SHOULD BE COPIED TO USER-SPACE TOOLS */
    1.10  #define USER_INFLATE_BALLOON  1   /* return mem to hypervisor */
    1.11  #define USER_DEFLATE_BALLOON  2   /* claim mem from hypervisor */
    1.12 @@ -74,11 +76,60 @@ static struct proc_dir_entry *balloon_pd
    1.13  unsigned long credit;
    1.14  static unsigned long current_pages, most_seen_pages;
    1.15  
    1.16 -/*
    1.17 - * Dead entry written into balloon-owned entries in the PMT.
    1.18 - * It is deliberately different to INVALID_P2M_ENTRY.
    1.19 - */
    1.20 -#define DEAD 0xdead1234
    1.21 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
    1.22 +
    1.23 +/* Head of the list of ballooned pages */
    1.24 +struct page *ball_pg_hd = NULL;
    1.25 +
    1.26 +void add_ballooned_page(unsigned long pfn)
    1.27 +{
    1.28 +    struct page *p = mem_map + pfn;
    1.29 +    
    1.30 +    p->private = (unsigned long)ball_pg_hd;
    1.31 +    ball_pg_hd = p;
    1.32 +}
    1.33 +
    1.34 +struct page *rem_ballooned_page(void)
    1.35 +{
    1.36 +    if ( ball_pg_hd != NULL )
    1.37 +    {
    1.38 +        struct page *ret = ball_pg_hd;
    1.39 +        ball_pg_hd = (struct page *)ball_pg_hd->private;
    1.40 +        return ret;
    1.41 +    }
    1.42 +    else
    1.43 +        return NULL;
    1.44 +}   
    1.45 +
    1.46 +#else
    1.47 +/* List of ballooned pages, threaded through the mem_map array. */
    1.48 +LIST_HEAD(ballooned_pages);
    1.49 +
    1.50 +void add_ballooned_page(unsigned long pfn)
    1.51 +{
    1.52 +    struct page *p = mem_map + pfn;
    1.53 +
    1.54 +    list_add(&p->list, &ballooned_pages);
    1.55 +}
    1.56 +
    1.57 +struct page *rem_ballooned_page(void)
    1.58 +{
    1.59 +    if(!list_empty(&ballooned_pages))
    1.60 +    {
    1.61 +        struct list_head *next;
    1.62 +        struct page *ret;
    1.63 +
    1.64 +        next = ballooned_pages.next;
    1.65 +        ret = list_entry(next, struct page, list);
    1.66 +        list_del(next);
    1.67 +
    1.68 +        return ret;
    1.69 +    }
    1.70 +    else
    1.71 +        return NULL;
    1.72 +}
    1.73 +
    1.74 +#endif
    1.75  
    1.76  static inline pte_t *get_ptep(unsigned long addr)
    1.77  {
    1.78 @@ -101,6 +152,7 @@ static inline pte_t *get_ptep(unsigned l
    1.79  
    1.80  /* Main function for relinquishing memory. */
    1.81  static unsigned long inflate_balloon(unsigned long num_pages)
    1.82 +
    1.83  {
    1.84      unsigned long *parray;
    1.85      unsigned long *currp;
    1.86 @@ -130,6 +182,7 @@ static unsigned long inflate_balloon(uns
    1.87              currp = parray;
    1.88              for ( j = 0; j < i; j++, currp++ )
    1.89                  __free_page((struct page *) (mem_map + *currp));
    1.90 +
    1.91              ret = -EFAULT;
    1.92              goto cleanup;
    1.93          }
    1.94 @@ -156,7 +209,10 @@ static unsigned long inflate_balloon(uns
    1.95              kunmap(&mem_map[*currp]);
    1.96          }
    1.97  #endif
    1.98 -        phys_to_machine_mapping[*currp] = DEAD;
    1.99 +
   1.100 +        add_ballooned_page(*currp);
   1.101 +
   1.102 +        phys_to_machine_mapping[*currp] = INVALID_P2M_ENTRY;
   1.103          *currp = mfn;
   1.104      }
   1.105  
   1.106 @@ -195,28 +251,38 @@ static unsigned long process_returned_pa
   1.107       * incorporated here.
   1.108       */
   1.109       
   1.110 -    unsigned long tot_pages = most_seen_pages;   
   1.111      unsigned long * curr = parray;
   1.112      unsigned long num_installed;
   1.113 -    unsigned long i;
   1.114 +
   1.115 +    struct page *page;
   1.116  
   1.117      num_installed = 0;
   1.118 -    for ( i = 0; (i < tot_pages) && (num_installed < num); i++ )
   1.119 +    while ( (page = rem_ballooned_page()) != NULL )
   1.120      {
   1.121 -        if ( phys_to_machine_mapping[i] == DEAD )
   1.122 +        unsigned long pfn;
   1.123 +
   1.124 +        if ( num_installed == num )
   1.125 +            break;
   1.126 +
   1.127 +        pfn = page - mem_map;
   1.128 +
   1.129 +        if(phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY)
   1.130          {
   1.131 -            phys_to_machine_mapping[i] = *curr;
   1.132 -            queue_machphys_update(*curr, i);
   1.133 -            if (i<max_low_pfn)
   1.134 -              queue_l1_entry_update(
   1.135 -                get_ptep((unsigned long)__va(i << PAGE_SHIFT)),
   1.136 +            printk("BUG: Tried to unballoon existing page!");
   1.137 +            BUG();
   1.138 +        }
   1.139 +
   1.140 +        phys_to_machine_mapping[pfn] = *curr;
   1.141 +        queue_machphys_update(*curr, pfn);
   1.142 +        if (pfn<max_low_pfn)
   1.143 +            queue_l1_entry_update(
   1.144 +                get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
   1.145                  ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
   1.146 -
   1.147 -            __free_page(mem_map + i);
   1.148 +        
   1.149 +        __free_page(mem_map + pfn);
   1.150  
   1.151 -            curr++;
   1.152 -            num_installed++;
   1.153 -        }
   1.154 +        curr++;
   1.155 +        num_installed++;
   1.156      }
   1.157  
   1.158      return num_installed;