ia64/xen-unstable

annotate xen/common/memory.c @ 8414:3d1c7be170a7

Remove direct references to frame_table array. Use
pfn_to_page (or page_to_pfn) instead.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Dec 20 12:46:56 2005 +0100 (2005-12-20)
parents c3cfc4ff3b08
children d966b7a00959
rev   line source
kaf24@6486 1 /******************************************************************************
kaf24@6486 2 * memory.c
kaf24@6486 3 *
kaf24@6486 4 * Code to handle memory-related requests.
kaf24@6486 5 *
kaf24@6486 6 * Copyright (c) 2003-2004, B Dragovic
kaf24@6486 7 * Copyright (c) 2003-2005, K A Fraser
kaf24@6486 8 */
kaf24@6486 9
kaf24@6486 10 #include <xen/config.h>
kaf24@6486 11 #include <xen/types.h>
kaf24@6486 12 #include <xen/lib.h>
kaf24@6486 13 #include <xen/mm.h>
kaf24@6486 14 #include <xen/perfc.h>
kaf24@6486 15 #include <xen/sched.h>
kaf24@6486 16 #include <xen/event.h>
kaf24@6486 17 #include <xen/shadow.h>
kaf24@6486 18 #include <asm/current.h>
kaf24@6486 19 #include <asm/hardirq.h>
kaf24@6486 20 #include <public/memory.h>
kaf24@6486 21
kaf24@6486 22 static long
kaf24@6486 23 increase_reservation(
kaf24@6486 24 struct domain *d,
kaf24@6486 25 unsigned long *extent_list,
kaf24@6486 26 unsigned int nr_extents,
kaf24@6486 27 unsigned int extent_order,
kaf24@6607 28 unsigned int flags,
kaf24@6607 29 int *preempted)
kaf24@6486 30 {
kaf24@6486 31 struct pfn_info *page;
kaf24@6752 32 unsigned int i;
kaf24@6486 33
kaf24@6701 34 if ( (extent_list != NULL) &&
kaf24@6701 35 !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
kaf24@6486 36 return 0;
kaf24@6486 37
kaf24@6486 38 if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
kaf24@6486 39 {
kaf24@6752 40 DPRINTK("Only I/O-capable domains may allocate multi-page extents.\n");
kaf24@6486 41 return 0;
kaf24@6486 42 }
kaf24@6486 43
kaf24@6486 44 for ( i = 0; i < nr_extents; i++ )
kaf24@6486 45 {
kaf24@6486 46 if ( hypercall_preempt_check() )
kaf24@6607 47 {
kaf24@6607 48 *preempted = 1;
kaf24@6486 49 return i;
kaf24@6607 50 }
kaf24@6486 51
kaf24@6486 52 if ( unlikely((page = alloc_domheap_pages(
kaf24@6486 53 d, extent_order, flags)) == NULL) )
kaf24@6486 54 {
kaf24@6752 55 DPRINTK("Could not allocate order=%d extent: "
kaf24@6752 56 "id=%d flags=%x (%d of %d)\n",
kaf24@6752 57 extent_order, d->domain_id, flags, i, nr_extents);
kaf24@6486 58 return i;
kaf24@6486 59 }
kaf24@6486 60
kaf24@6486 61 /* Inform the domain of the new page's machine address. */
kaf24@6701 62 if ( (extent_list != NULL) &&
kaf24@6701 63 (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
kaf24@6486 64 return i;
kaf24@6486 65 }
kaf24@6486 66
kaf24@6486 67 return nr_extents;
kaf24@6486 68 }
kaf24@6486 69
kaf24@6486 70 static long
kaf24@6486 71 decrease_reservation(
kaf24@6486 72 struct domain *d,
kaf24@6486 73 unsigned long *extent_list,
kaf24@6486 74 unsigned int nr_extents,
kaf24@6486 75 unsigned int extent_order,
kaf24@6607 76 unsigned int flags,
kaf24@6607 77 int *preempted)
kaf24@6486 78 {
kaf24@6486 79 struct pfn_info *page;
kaf24@6486 80 unsigned long i, j, mpfn;
kaf24@6486 81
kaf24@6486 82 if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
kaf24@6486 83 return 0;
kaf24@6486 84
kaf24@6486 85 for ( i = 0; i < nr_extents; i++ )
kaf24@6486 86 {
kaf24@6486 87 if ( hypercall_preempt_check() )
kaf24@6607 88 {
kaf24@6607 89 *preempted = 1;
kaf24@6486 90 return i;
kaf24@6607 91 }
kaf24@6486 92
kaf24@6486 93 if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
kaf24@6486 94 return i;
kaf24@6486 95
kaf24@6486 96 for ( j = 0; j < (1 << extent_order); j++ )
kaf24@6486 97 {
kaf24@6486 98 if ( unlikely((mpfn + j) >= max_page) )
kaf24@6486 99 {
kaf24@6486 100 DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
kaf24@6486 101 d->domain_id, mpfn + j, max_page);
kaf24@6486 102 return i;
kaf24@6486 103 }
kaf24@6486 104
kaf24@8414 105 page = pfn_to_page(mpfn + j);
kaf24@6486 106 if ( unlikely(!get_page(page, d)) )
kaf24@6486 107 {
kaf24@6486 108 DPRINTK("Bad page free for domain %u\n", d->domain_id);
kaf24@6486 109 return i;
kaf24@6486 110 }
kaf24@6486 111
kaf24@6486 112 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
kaf24@6486 113 put_page_and_type(page);
kaf24@6486 114
kaf24@6486 115 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
kaf24@6486 116 put_page(page);
kaf24@6486 117
kaf24@6486 118 shadow_sync_and_drop_references(d, page);
kaf24@6486 119
kaf24@6486 120 put_page(page);
kaf24@6486 121 }
kaf24@6486 122 }
kaf24@6486 123
kaf24@6486 124 return nr_extents;
kaf24@6486 125 }
kaf24@6486 126
kaf24@6486 127 /*
kaf24@6486 128 * To allow safe resume of do_memory_op() after preemption, we need to know
kaf24@6486 129 * at what point in the page list to resume. For this purpose I steal the
kaf24@6486 130 * high-order bits of the @cmd parameter, which are otherwise unused and zero.
kaf24@6486 131 */
kaf24@6486 132 #define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */
kaf24@6486 133
kaf24@6486 134 long do_memory_op(int cmd, void *arg)
kaf24@6486 135 {
kaf24@6486 136 struct domain *d;
kaf24@6607 137 int rc, start_extent, op, flags = 0, preempted = 0;
kaf24@6486 138 struct xen_memory_reservation reservation;
kaf24@7959 139 domid_t domid;
kaf24@6486 140
kaf24@6486 141 op = cmd & ((1 << START_EXTENT_SHIFT) - 1);
kaf24@6486 142
kaf24@6486 143 switch ( op )
kaf24@6486 144 {
kaf24@6486 145 case XENMEM_increase_reservation:
kaf24@6486 146 case XENMEM_decrease_reservation:
kaf24@6486 147 if ( copy_from_user(&reservation, arg, sizeof(reservation)) )
kaf24@6486 148 return -EFAULT;
kaf24@6486 149
kaf24@6486 150 start_extent = cmd >> START_EXTENT_SHIFT;
kaf24@6486 151 if ( unlikely(start_extent > reservation.nr_extents) )
kaf24@6486 152 return -EINVAL;
kaf24@6486 153
kaf24@6486 154 if ( reservation.extent_start != NULL )
kaf24@6486 155 reservation.extent_start += start_extent;
kaf24@6486 156 reservation.nr_extents -= start_extent;
kaf24@6486 157
kaf24@6701 158 if ( (reservation.address_bits != 0) &&
kaf24@6702 159 (reservation.address_bits <
kaf24@6702 160 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kaf24@6486 161 {
kaf24@6486 162 if ( reservation.address_bits < 31 )
kaf24@6486 163 return -ENOMEM;
kaf24@6486 164 flags = ALLOC_DOM_DMA;
kaf24@6486 165 }
kaf24@6486 166
kaf24@6486 167 if ( likely(reservation.domid == DOMID_SELF) )
kaf24@6486 168 d = current->domain;
kaf24@6486 169 else if ( !IS_PRIV(current->domain) )
kaf24@6486 170 return -EPERM;
kaf24@6486 171 else if ( (d = find_domain_by_id(reservation.domid)) == NULL )
kaf24@6486 172 return -ESRCH;
kaf24@6486 173
kaf24@6486 174 rc = ((op == XENMEM_increase_reservation) ?
kaf24@6486 175 increase_reservation : decrease_reservation)(
kaf24@6486 176 d,
kaf24@6486 177 reservation.extent_start,
kaf24@6486 178 reservation.nr_extents,
kaf24@6486 179 reservation.extent_order,
kaf24@6607 180 flags,
kaf24@6607 181 &preempted);
kaf24@6486 182
kaf24@6486 183 if ( unlikely(reservation.domid != DOMID_SELF) )
kaf24@6486 184 put_domain(d);
kaf24@6486 185
kaf24@6486 186 rc += start_extent;
kaf24@6486 187
kaf24@6607 188 if ( preempted )
kaf24@6486 189 return hypercall2_create_continuation(
kaf24@6607 190 __HYPERVISOR_memory_op, op | (rc << START_EXTENT_SHIFT), arg);
kaf24@6607 191
kaf24@6486 192 break;
kaf24@6486 193
kaf24@6486 194 case XENMEM_maximum_ram_page:
kaf24@7959 195 rc = max_page;
kaf24@7959 196 break;
kaf24@7959 197
kaf24@7959 198 case XENMEM_current_reservation:
kaf24@7959 199 case XENMEM_maximum_reservation:
kaf24@7959 200 if ( get_user(domid, (domid_t *)arg) )
kaf24@6486 201 return -EFAULT;
kaf24@7959 202
kaf24@7959 203 if ( likely((domid = (unsigned long)arg) == DOMID_SELF) )
kaf24@7959 204 d = current->domain;
kaf24@7959 205 else if ( !IS_PRIV(current->domain) )
kaf24@7959 206 return -EPERM;
kaf24@7959 207 else if ( (d = find_domain_by_id(domid)) == NULL )
kaf24@7959 208 return -ESRCH;
kaf24@7959 209
kaf24@7959 210 rc = (op == XENMEM_current_reservation) ? d->tot_pages : d->max_pages;
kaf24@7959 211
kaf24@7959 212 if ( unlikely(domid != DOMID_SELF) )
kaf24@7959 213 put_domain(d);
kaf24@7959 214
kaf24@6486 215 break;
kaf24@6486 216
kaf24@6486 217 default:
kaf24@8059 218 rc = arch_memory_op(op, arg);
kaf24@6486 219 break;
kaf24@6486 220 }
kaf24@6486 221
kaf24@6486 222 return rc;
kaf24@6486 223 }
kaf24@6486 224
kaf24@6486 225 /*
kaf24@6486 226 * Local variables:
kaf24@6486 227 * mode: C
kaf24@6486 228 * c-set-style: "BSD"
kaf24@6486 229 * c-basic-offset: 4
kaf24@6486 230 * tab-width: 4
kaf24@6486 231 * indent-tabs-mode: nil
kaf24@6486 232 * End:
kaf24@6486 233 */