ia64/xen-unstable

changeset 2667:77b6b9acf383

bitkeeper revision 1.1159.1.245 (41768fbdciA3gqG1QTkfP7ReGFEgPg)

Copy balloon driver from 2.4.
author mwilli2@equilibrium.research
date Wed Oct 20 16:18:05 2004 +0000 (2004-10-20)
parents d10a9677a639
children 0c8e3e76fd4f
files .rootkeys linux-2.6.8.1-xen-sparse/drivers/xen/balloon/Makefile linux-2.6.8.1-xen-sparse/drivers/xen/balloon/balloon.c
line diff
     1.1 --- a/.rootkeys	Wed Oct 20 14:44:56 2004 +0000
     1.2 +++ b/.rootkeys	Wed Oct 20 16:18:05 2004 +0000
     1.3 @@ -183,6 +183,8 @@ 41261688yS8eAyy-7kzG4KBs0xbYCA linux-2.6
     1.4  4108f5c1WfTIrs0HZFeV39sttekCTw linux-2.6.8.1-xen-sparse/drivers/char/mem.c
     1.5  4111308bZAIzwf_Kzu6x1TZYZ3E0_Q linux-2.6.8.1-xen-sparse/drivers/char/tty_io.c
     1.6  40f56239Dp_vMTgz8TEbvo1hjHGc3w linux-2.6.8.1-xen-sparse/drivers/xen/Makefile
     1.7 +41768fbcncpBQf8s2l2-CwoSNIZ9uA linux-2.6.8.1-xen-sparse/drivers/xen/balloon/Makefile
     1.8 +3e6377f8i5e9eGz7Pw6fQuhuTQ7DQg linux-2.6.8.1-xen-sparse/drivers/xen/balloon/balloon.c
     1.9  410d0893otFGghmv4dUXDUBBdY5aIA linux-2.6.8.1-xen-sparse/drivers/xen/blkback/Makefile
    1.10  4087cf0d1XgMkooTZAiJS6NrcpLQNQ linux-2.6.8.1-xen-sparse/drivers/xen/blkback/blkback.c
    1.11  4087cf0dZadZ8r6CEt4fNN350Yle3A linux-2.6.8.1-xen-sparse/drivers/xen/blkback/common.h
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/linux-2.6.8.1-xen-sparse/drivers/xen/balloon/Makefile	Wed Oct 20 16:18:05 2004 +0000
     2.3 @@ -0,0 +1,2 @@
     2.4 +
     2.5 +obj-y += balloon.o
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/linux-2.6.8.1-xen-sparse/drivers/xen/balloon/balloon.c	Wed Oct 20 16:18:05 2004 +0000
     3.3 @@ -0,0 +1,513 @@
     3.4 +/******************************************************************************
     3.5 + * balloon.c
     3.6 + *
     3.7 + * Xen balloon driver - enables returning/claiming memory to/from Xen.
     3.8 + *
     3.9 + * Copyright (c) 2003, B Dragovic
    3.10 + */
    3.11 +
    3.12 +#include <linux/config.h>
    3.13 +#include <linux/module.h>
    3.14 +#include <linux/kernel.h>
    3.15 +#include <linux/sched.h>
    3.16 +#include <linux/errno.h>
    3.17 +#include <asm/xen_proc.h>
    3.18 +
    3.19 +#include <linux/mm.h>
    3.20 +#include <linux/mman.h>
    3.21 +#include <linux/smp_lock.h>
    3.22 +#include <linux/pagemap.h>
    3.23 +#include <linux/bootmem.h>
    3.24 +#include <linux/highmem.h>
    3.25 +#include <linux/vmalloc.h>
    3.26 +
    3.27 +#include <asm/hypervisor.h>
    3.28 +#include <asm/pgalloc.h>
    3.29 +#include <asm/pgtable.h>
    3.30 +#include <asm/uaccess.h>
    3.31 +#include <asm/tlb.h>
    3.32 +
    3.33 +/* USER DEFINES -- THESE SHOULD BE COPIED TO USER-SPACE TOOLS */
    3.34 +#define USER_INFLATE_BALLOON  1   /* return mem to hypervisor */
    3.35 +#define USER_DEFLATE_BALLOON  2   /* claim mem from hypervisor */
    3.36 +typedef struct user_balloon_op {
    3.37 +    unsigned int  op;
    3.38 +    unsigned long size;
    3.39 +} user_balloon_op_t;
    3.40 +/* END OF USER DEFINE */
    3.41 +
    3.42 +static struct proc_dir_entry *balloon_pde;
    3.43 +unsigned long credit;
    3.44 +static unsigned long current_pages, most_seen_pages;
    3.45 +
    3.46 +/*
    3.47 + * Dead entry written into balloon-owned entries in the PMT.
    3.48 + * It is deliberately different to INVALID_P2M_ENTRY.
    3.49 + */
    3.50 +#define DEAD 0xdead1234
    3.51 +
    3.52 +static inline pte_t *get_ptep(unsigned long addr)
    3.53 +{
    3.54 +    pgd_t *pgd; pmd_t *pmd; pte_t *ptep;
    3.55 +    pgd = pgd_offset_k(addr);
    3.56 +
    3.57 +    if ( pgd_none(*pgd) || pgd_bad(*pgd) ) BUG();
    3.58 +
    3.59 +    pmd = pmd_offset(pgd, addr);
    3.60 +    if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
    3.61 +
    3.62 +    ptep = pte_offset(pmd, addr);
    3.63 +
    3.64 +    return ptep;
    3.65 +}
    3.66 +
    3.67 +/* Main function for relinquishing memory. */
    3.68 +static unsigned long inflate_balloon(unsigned long num_pages)
    3.69 +{
    3.70 +    unsigned long *parray;
    3.71 +    unsigned long *currp;
    3.72 +    unsigned long curraddr;
    3.73 +    unsigned long ret = 0;
    3.74 +    unsigned long i, j;
    3.75 +
    3.76 +    parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
    3.77 +    if ( parray == NULL )
    3.78 +    {
    3.79 +        printk(KERN_ERR "inflate_balloon: Unable to vmalloc parray\n");
    3.80 +        return -EFAULT;
    3.81 +    }
    3.82 +
    3.83 +    currp = parray;
    3.84 +
    3.85 +    for ( i = 0; i < num_pages; i++, currp++ )
    3.86 +    {
    3.87 +        struct page *page = alloc_page(GFP_HIGHUSER);
    3.88 +        unsigned long pfn = page - mem_map;
    3.89 +
    3.90 +        /* If allocation fails then free all reserved pages. */
    3.91 +        if ( page == NULL )
    3.92 +        {
    3.93 +            printk(KERN_ERR "Unable to inflate balloon by %ld, only"
    3.94 +                   " %ld pages free.", num_pages, i);
    3.95 +            currp = parray;
    3.96 +            for ( j = 0; j < i; j++, currp++ )
    3.97 +                __free_page((struct page *) (mem_map + *currp));
    3.98 +            ret = -EFAULT;
    3.99 +            goto cleanup;
   3.100 +        }
   3.101 +
   3.102 +        *currp = pfn;
   3.103 +    }
   3.104 +
   3.105 +
   3.106 +    for ( i = 0, currp = parray; i < num_pages; i++, currp++ )
   3.107 +    {
   3.108 +        unsigned long mfn = phys_to_machine_mapping[*currp];
   3.109 +        curraddr = (unsigned long)page_address(mem_map + *currp);
   3.110 +        /* Blow away page contents for security, and also p.t. ref if any. */
   3.111 +        if ( curraddr != 0 )
   3.112 +        {
   3.113 +            scrub_pages(curraddr, 1);
   3.114 +            queue_l1_entry_update(get_ptep(curraddr), 0);
   3.115 +        }
   3.116 +#ifdef CONFIG_XEN_SCRUB_PAGES
   3.117 +        else
   3.118 +        {
   3.119 +            void *p = kmap(&mem_map[*currp]);
   3.120 +            scrub_pages(p, 1);
   3.121 +            kunmap(&mem_map[*currp]);
   3.122 +        }
   3.123 +#endif
   3.124 +        phys_to_machine_mapping[*currp] = DEAD;
   3.125 +        *currp = mfn;
   3.126 +    }
   3.127 +
   3.128 +    /* Flush updates through and flush the TLB. */
   3.129 +    xen_tlb_flush();
   3.130 +
   3.131 +    ret = HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
   3.132 +                                parray, num_pages, 0);
   3.133 +    if ( unlikely(ret != num_pages) )
   3.134 +    {
   3.135 +        printk(KERN_ERR "Unable to inflate balloon, error %lx\n", ret);
   3.136 +        goto cleanup;
   3.137 +    }
   3.138 +
   3.139 +    credit += num_pages;
   3.140 +    ret = num_pages;
   3.141 +
   3.142 + cleanup:
   3.143 +    vfree(parray);
   3.144 +
   3.145 +    return ret;
   3.146 +}
   3.147 +
   3.148 +/*
   3.149 + * Install new mem pages obtained by deflate_balloon. function walks 
   3.150 + * phys->machine mapping table looking for DEAD entries and populates
   3.151 + * them.
   3.152 + */
   3.153 +static unsigned long process_returned_pages(unsigned long * parray, 
   3.154 +                                       unsigned long num)
   3.155 +{
   3.156 +    /* currently, this function is rather simplistic as 
   3.157 +     * it is assumed that domain reclaims only number of 
   3.158 +     * pages previously released. this is to change soon
   3.159 +     * and the code to extend page tables etc. will be 
   3.160 +     * incorporated here.
   3.161 +     */
   3.162 +     
   3.163 +    unsigned long tot_pages = most_seen_pages;   
   3.164 +    unsigned long * curr = parray;
   3.165 +    unsigned long num_installed;
   3.166 +    unsigned long i;
   3.167 +
   3.168 +    num_installed = 0;
   3.169 +    for ( i = 0; (i < tot_pages) && (num_installed < num); i++ )
   3.170 +    {
   3.171 +        if ( phys_to_machine_mapping[i] == DEAD )
   3.172 +        {
   3.173 +            phys_to_machine_mapping[i] = *curr;
   3.174 +            queue_machphys_update(*curr, i);
   3.175 +            if (i<max_low_pfn)
   3.176 +              queue_l1_entry_update(
   3.177 +                get_ptep((unsigned long)__va(i << PAGE_SHIFT)),
   3.178 +                ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
   3.179 +
   3.180 +            __free_page(mem_map + i);
   3.181 +
   3.182 +            curr++;
   3.183 +            num_installed++;
   3.184 +        }
   3.185 +    }
   3.186 +
   3.187 +    return num_installed;
   3.188 +}
   3.189 +
   3.190 +unsigned long deflate_balloon(unsigned long num_pages)
   3.191 +{
   3.192 +    unsigned long ret;
   3.193 +    unsigned long * parray;
   3.194 +
   3.195 +    if ( num_pages > credit )
   3.196 +    {
   3.197 +        printk(KERN_ERR "deflate_balloon: %lu pages > %lu credit.\n",
   3.198 +               num_pages, credit);
   3.199 +        return -EAGAIN;
   3.200 +    }
   3.201 +
   3.202 +    parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
   3.203 +    if ( parray == NULL )
   3.204 +    {
   3.205 +        printk(KERN_ERR "deflate_balloon: Unable to vmalloc parray\n");
   3.206 +        return 0;
   3.207 +    }
   3.208 +
   3.209 +    ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, 
   3.210 +                                parray, num_pages, 0);
   3.211 +    if ( unlikely(ret != num_pages) )
   3.212 +    {
   3.213 +        printk(KERN_ERR "deflate_balloon: xen increase_reservation err %lx\n",
   3.214 +               ret);
   3.215 +        goto cleanup;
   3.216 +    }
   3.217 +
   3.218 +    if ( (ret = process_returned_pages(parray, num_pages)) < num_pages )
   3.219 +    {
   3.220 +        printk(KERN_WARNING
   3.221 +               "deflate_balloon: restored only %lx of %lx pages.\n",
   3.222 +           ret, num_pages);
   3.223 +        goto cleanup;
   3.224 +    }
   3.225 +
   3.226 +    ret = num_pages;
   3.227 +    credit -= num_pages;
   3.228 +
   3.229 + cleanup:
   3.230 +    vfree(parray);
   3.231 +
   3.232 +    return ret;
   3.233 +}
   3.234 +
   3.235 +#define PAGE_TO_MB_SHIFT 8
   3.236 +
   3.237 +/*
   3.238 + * pagetable_extend() mimics pagetable_init() from arch/xen/mm/init.c 
   3.239 + * The loops do go through all of low memory (ZONE_NORMAL).  The
   3.240 + * old pages have _PAGE_PRESENT set and so get skipped.
   3.241 + * If low memory is not full, the new pages are used to fill it, going
   3.242 + * from cur_low_pfn to low_pfn.   high memory is not direct mapped so
   3.243 + * no extension is needed for new high memory.
   3.244 + */
   3.245 +
   3.246 +static void pagetable_extend (int cur_low_pfn, int newpages)
   3.247 +{
   3.248 +    unsigned long vaddr, end;
   3.249 +    pgd_t *kpgd, *pgd, *pgd_base;
   3.250 +    int i, j, k;
   3.251 +    pmd_t *kpmd, *pmd;
   3.252 +    pte_t *kpte, *pte, *pte_base;
   3.253 +    int low_pfn = min(cur_low_pfn+newpages,(int)max_low_pfn);
   3.254 +
   3.255 +    /*
   3.256 +     * This can be zero as well - no problem, in that case we exit
   3.257 +     * the loops anyway due to the PTRS_PER_* conditions.
   3.258 +     */
   3.259 +    end = (unsigned long)__va(low_pfn*PAGE_SIZE);
   3.260 +
   3.261 +    pgd_base = init_mm.pgd;
   3.262 +    i = __pgd_offset(PAGE_OFFSET);
   3.263 +    pgd = pgd_base + i;
   3.264 +
   3.265 +    for (; i < PTRS_PER_PGD; pgd++, i++) {
   3.266 +        vaddr = i*PGDIR_SIZE;
   3.267 +        if (end && (vaddr >= end))
   3.268 +            break;
   3.269 +        pmd = (pmd_t *)pgd;
   3.270 +        for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
   3.271 +            vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
   3.272 +            if (end && (vaddr >= end))
   3.273 +                break;
   3.274 +
   3.275 +            /* Filled in for us already? */
   3.276 +            if ( pmd_val(*pmd) & _PAGE_PRESENT )
   3.277 +                continue;
   3.278 +
   3.279 +            pte_base = pte = (pte_t *) __get_free_page(GFP_KERNEL);
   3.280 +
   3.281 +            for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
   3.282 +                vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
   3.283 +                if (end && (vaddr >= end))
   3.284 +                    break;
   3.285 +                *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
   3.286 +            }
   3.287 +            kpgd = pgd_offset_k((unsigned long)pte_base);
   3.288 +            kpmd = pmd_offset(kpgd, (unsigned long)pte_base);
   3.289 +            kpte = pte_offset(kpmd, (unsigned long)pte_base);
   3.290 +            queue_l1_entry_update(kpte,
   3.291 +                                  (*(unsigned long *)kpte)&~_PAGE_RW);
   3.292 +            set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
   3.293 +            XEN_flush_page_update_queue();
   3.294 +        }
   3.295 +    }
   3.296 +}
   3.297 +
   3.298 +/*
   3.299 + * claim_new_pages() asks xen to increase this domain's memory  reservation
   3.300 + * and return a list of the new pages of memory.  This new pages are
   3.301 + * added to the free list of the memory manager.
   3.302 + *
   3.303 + * Available RAM does not normally change while Linux runs.  To make this work,
   3.304 + * the linux mem= boottime command line param must say how big memory could
   3.305 + * possibly grow.  Then setup_arch() in arch/xen/kernel/setup.c
   3.306 + * sets max_pfn, max_low_pfn and the zones according to
   3.307 + * this max memory size.   The page tables themselves can only be
   3.308 + * extended after xen has assigned new pages to this domain.
   3.309 + */
   3.310 +
   3.311 +static unsigned long
   3.312 +claim_new_pages(unsigned long num_pages)
   3.313 +{
   3.314 +    unsigned long new_page_cnt, pfn;
   3.315 +    unsigned long * parray, *curr;
   3.316 +
   3.317 +    if (most_seen_pages+num_pages> max_pfn)
   3.318 +        num_pages = max_pfn-most_seen_pages;
   3.319 +    if (num_pages==0) return 0;
   3.320 +
   3.321 +    parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
   3.322 +    if ( parray == NULL )
   3.323 +    {
   3.324 +        printk(KERN_ERR "claim_new_pages: Unable to vmalloc parray\n");
   3.325 +        return 0;
   3.326 +    }
   3.327 +
   3.328 +    new_page_cnt = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, 
   3.329 +                                parray, num_pages, 0);
   3.330 +    if ( new_page_cnt != num_pages )
   3.331 +    {
   3.332 +        printk(KERN_WARNING
   3.333 +            "claim_new_pages: xen granted only %lu of %lu requested pages\n",
   3.334 +            new_page_cnt, num_pages);
   3.335 +
   3.336 +        /* 
   3.337 +         * Avoid xen lockup when user forgot to setdomainmaxmem. Xen
   3.338 +         * usually can dribble out a few pages and then hangs.
   3.339 +         */
   3.340 +        if ( new_page_cnt < 1000 )
   3.341 +        {
   3.342 +            printk(KERN_WARNING "Remember to use setdomainmaxmem\n");
   3.343 +            HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
   3.344 +                                parray, new_page_cnt, 0);
   3.345 +            return -EFAULT;
   3.346 +        }
   3.347 +    }
   3.348 +    memcpy(phys_to_machine_mapping+most_seen_pages, parray,
   3.349 +           new_page_cnt * sizeof(unsigned long));
   3.350 +
   3.351 +    pagetable_extend(most_seen_pages,new_page_cnt);
   3.352 +
   3.353 +    for ( pfn = most_seen_pages, curr = parray;
   3.354 +          pfn < most_seen_pages+new_page_cnt;
   3.355 +          pfn++, curr++ )
   3.356 +    {
   3.357 +        struct page *page = mem_map + pfn;
   3.358 +
   3.359 +#ifndef CONFIG_HIGHMEM
   3.360 +        if ( pfn>=max_low_pfn )
   3.361 +        {
   3.362 +            printk(KERN_WARNING "Warning only %ldMB will be used.\n",
   3.363 +               pfn>>PAGE_TO_MB_SHIFT);
   3.364 +            printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
   3.365 +            break;
   3.366 +        }
   3.367 +#endif
   3.368 +        queue_machphys_update(*curr, pfn);
   3.369 +        if ( pfn < max_low_pfn )
   3.370 +            queue_l1_entry_update(
   3.371 +                get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
   3.372 +                ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
   3.373 +        
   3.374 +        XEN_flush_page_update_queue();
   3.375 +        
   3.376 +        /* this next bit mimics arch/xen/mm/init.c:one_highpage_init() */
   3.377 +        ClearPageReserved(page);
   3.378 +        if ( pfn >= max_low_pfn )
   3.379 +            set_bit(PG_highmem, &page->flags);
   3.380 +        set_page_count(page, 1);
   3.381 +        __free_page(page);
   3.382 +    }
   3.383 +
   3.384 +    vfree(parray);
   3.385 +
   3.386 +    return new_page_cnt;
   3.387 +}
   3.388 +
   3.389 +static int balloon_write(struct file *file, const char *buffer,
   3.390 +                         u_long count, void *data)
   3.391 +{
   3.392 +    char memstring[64], *endchar;
   3.393 +    int len, i;
   3.394 +    unsigned long target;
   3.395 +    unsigned long long targetbytes;
   3.396 +
   3.397 +    /* Only admin can play with the balloon :) */
   3.398 +    if ( !capable(CAP_SYS_ADMIN) )
   3.399 +        return -EPERM;
   3.400 +
   3.401 +    if ( count > sizeof(memstring) )
   3.402 +        return -EFBIG;
   3.403 +
   3.404 +    len = strnlen_user(buffer, count);
   3.405 +    if ( len == 0 ) return -EBADMSG;
   3.406 +    if ( len == 1 ) return 1; /* input starts with a NUL char */
   3.407 +    if ( strncpy_from_user(memstring, buffer, len) < 0 )
   3.408 +        return -EFAULT;
   3.409 +
   3.410 +    endchar = memstring;
   3.411 +    for ( i = 0; i < len; ++i, ++endchar )
   3.412 +        if ( (memstring[i] < '0') || (memstring[i] > '9') )
   3.413 +            break;
   3.414 +    if ( i == 0 )
   3.415 +        return -EBADMSG;
   3.416 +
   3.417 +    targetbytes = memparse(memstring,&endchar);
   3.418 +    target = targetbytes >> PAGE_SHIFT;
   3.419 +
   3.420 +    if ( target < current_pages )
   3.421 +    {
   3.422 +        int change = inflate_balloon(current_pages-target);
   3.423 +        if ( change <= 0 )
   3.424 +            return change;
   3.425 +
   3.426 +        current_pages -= change;
   3.427 +        printk(KERN_INFO "Relinquish %dMB to xen. Domain now has %luMB\n",
   3.428 +            change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
   3.429 +    }
   3.430 +    else if ( target > current_pages )
   3.431 +    {
   3.432 +        int change, reclaim = min(target,most_seen_pages) - current_pages;
   3.433 +
   3.434 +        if ( reclaim )
   3.435 +        {
   3.436 +            change = deflate_balloon( reclaim);
   3.437 +            if ( change <= 0 )
   3.438 +                return change;
   3.439 +            current_pages += change;
   3.440 +            printk(KERN_INFO "Reclaim %dMB from xen. Domain now has %luMB\n",
   3.441 +                change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
   3.442 +        }
   3.443 +
   3.444 +        if ( most_seen_pages < target )
   3.445 +        {
   3.446 +            int growth = claim_new_pages(target-most_seen_pages);
   3.447 +            if ( growth <= 0 )
   3.448 +                return growth;
   3.449 +            most_seen_pages += growth;
   3.450 +            current_pages += growth;
   3.451 +            printk(KERN_INFO "Granted %dMB new mem. Dom now has %luMB\n",
   3.452 +                growth>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
   3.453 +        }
   3.454 +    }
   3.455 +
   3.456 +
   3.457 +    return len;
   3.458 +}
   3.459 +
   3.460 +
   3.461 +static int balloon_read(char *page, char **start, off_t off,
   3.462 +      int count, int *eof, void *data)
   3.463 +{
   3.464 +    int len;
   3.465 +    len = sprintf(page,"%lu\n",current_pages<<PAGE_SHIFT);
   3.466 +
   3.467 +    if (len <= off+count) *eof = 1;
   3.468 +    *start = page + off;
   3.469 +    len -= off;
   3.470 +    if (len>count) len = count;
   3.471 +    if (len<0) len = 0;
   3.472 +    return len;
   3.473 +}
   3.474 +
   3.475 +static int __init init_module(void)
   3.476 +{
   3.477 +    printk(KERN_ALERT "Starting Xen Balloon driver\n");
   3.478 +
   3.479 +    most_seen_pages = current_pages = min(xen_start_info.nr_pages,max_pfn);
   3.480 +    if ( (balloon_pde = create_xen_proc_entry("memory_target", 0644)) == NULL )
   3.481 +    {
   3.482 +        printk(KERN_ALERT "Unable to create balloon driver proc entry!");
   3.483 +        return -1;
   3.484 +    }
   3.485 +
   3.486 +    balloon_pde->write_proc = balloon_write;
   3.487 +    balloon_pde->read_proc = balloon_read;
   3.488 +
   3.489 +    /* 
   3.490 +     * make a new phys map if mem= says xen can give us memory  to grow
   3.491 +     */
   3.492 +    if ( max_pfn > xen_start_info.nr_pages )
   3.493 +    {
   3.494 +        extern unsigned long *phys_to_machine_mapping;
   3.495 +        unsigned long *newmap;
   3.496 +        newmap = (unsigned long *)vmalloc(max_pfn * sizeof(unsigned long));
   3.497 +        memset(newmap, ~0, max_pfn * sizeof(unsigned long));
   3.498 +        memcpy(newmap, phys_to_machine_mapping,
   3.499 +               xen_start_info.nr_pages * sizeof(unsigned long));
   3.500 +        phys_to_machine_mapping = newmap;
   3.501 +    }
   3.502 +
   3.503 +    return 0;
   3.504 +}
   3.505 +
   3.506 +static void __exit cleanup_module(void)
   3.507 +{
   3.508 +    if ( balloon_pde != NULL )
   3.509 +    {
   3.510 +        remove_xen_proc_entry("balloon");
   3.511 +        balloon_pde = NULL;
   3.512 +    }
   3.513 +}
   3.514 +
   3.515 +module_init(init_module);
   3.516 +module_exit(cleanup_module);