ia64/xen-unstable

annotate xen/common/memory.c @ 8726:0c94043f5c5b

Rename physical-address-related variables and functions
to follow a new ocnsistent naming scheme.

gpfn is a guest pseudophys frame number.
gmfn is a machine frame number (from guest p.o.v.)
mfn is a real bona fide machine number.
pfn is an arbitrary frame number (used in general-purpose
'polymorphic' functions).

pfn_info now called page_info.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Feb 01 16:28:50 2006 +0100 (2006-02-01)
parents ce057aa33cad
children 8aeb417387ca
rev   line source
kaf24@6486 1 /******************************************************************************
kaf24@6486 2 * memory.c
kaf24@6486 3 *
kaf24@6486 4 * Code to handle memory-related requests.
kaf24@6486 5 *
kaf24@6486 6 * Copyright (c) 2003-2004, B Dragovic
kaf24@6486 7 * Copyright (c) 2003-2005, K A Fraser
kaf24@6486 8 */
kaf24@6486 9
kaf24@6486 10 #include <xen/config.h>
kaf24@6486 11 #include <xen/types.h>
kaf24@6486 12 #include <xen/lib.h>
kaf24@6486 13 #include <xen/mm.h>
kaf24@6486 14 #include <xen/perfc.h>
kaf24@6486 15 #include <xen/sched.h>
kaf24@6486 16 #include <xen/event.h>
kaf24@6486 17 #include <xen/shadow.h>
kaf24@8468 18 #include <xen/iocap.h>
kaf24@6486 19 #include <asm/current.h>
kaf24@6486 20 #include <asm/hardirq.h>
kaf24@6486 21 #include <public/memory.h>
kaf24@6486 22
kaf24@6486 23 static long
kaf24@6486 24 increase_reservation(
kaf24@6486 25 struct domain *d,
kaf24@6486 26 unsigned long *extent_list,
kaf24@6486 27 unsigned int nr_extents,
kaf24@6486 28 unsigned int extent_order,
kaf24@6607 29 unsigned int flags,
kaf24@6607 30 int *preempted)
kaf24@6486 31 {
kaf24@8726 32 struct page_info *page;
kaf24@8673 33 unsigned long i;
kaf24@6486 34
kaf24@6701 35 if ( (extent_list != NULL) &&
kaf24@6701 36 !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
kaf24@6486 37 return 0;
kaf24@6486 38
kaf24@8468 39 if ( (extent_order != 0) &&
kaf24@8468 40 !multipage_allocation_permitted(current->domain) )
kaf24@6486 41 return 0;
kaf24@6486 42
kaf24@6486 43 for ( i = 0; i < nr_extents; i++ )
kaf24@6486 44 {
kaf24@6486 45 if ( hypercall_preempt_check() )
kaf24@6607 46 {
kaf24@6607 47 *preempted = 1;
kaf24@6486 48 return i;
kaf24@6607 49 }
kaf24@6486 50
kaf24@6486 51 if ( unlikely((page = alloc_domheap_pages(
kaf24@6486 52 d, extent_order, flags)) == NULL) )
kaf24@6486 53 {
kaf24@6752 54 DPRINTK("Could not allocate order=%d extent: "
kaf24@8673 55 "id=%d flags=%x (%ld of %d)\n",
kaf24@6752 56 extent_order, d->domain_id, flags, i, nr_extents);
kaf24@6486 57 return i;
kaf24@6486 58 }
kaf24@6486 59
kaf24@6486 60 /* Inform the domain of the new page's machine address. */
kaf24@6701 61 if ( (extent_list != NULL) &&
kaf24@8726 62 (__put_user(page_to_mfn(page), &extent_list[i]) != 0) )
kaf24@6486 63 return i;
kaf24@6486 64 }
kaf24@6486 65
kaf24@6486 66 return nr_extents;
kaf24@6486 67 }
sos22@8688 68
kaf24@6486 69 static long
kaf24@8673 70 populate_physmap(
kaf24@8673 71 struct domain *d,
kaf24@8673 72 unsigned long *extent_list,
kaf24@8673 73 unsigned int nr_extents,
kaf24@8673 74 unsigned int extent_order,
kaf24@8673 75 unsigned int flags,
kaf24@8673 76 int *preempted)
kaf24@8673 77 {
kaf24@8726 78 struct page_info *page;
kaf24@8694 79 unsigned long i, j, pfn, mfn;
kaf24@8673 80
kaf24@8673 81 if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
kaf24@8673 82 return 0;
kaf24@8673 83
kaf24@8673 84 if ( (extent_order != 0) &&
kaf24@8673 85 !multipage_allocation_permitted(current->domain) )
kaf24@8673 86 return 0;
kaf24@8673 87
kaf24@8673 88 for ( i = 0; i < nr_extents; i++ )
kaf24@8673 89 {
kaf24@8673 90 if ( hypercall_preempt_check() )
kaf24@8673 91 {
kaf24@8673 92 *preempted = 1;
sos22@8688 93 goto out;
kaf24@8673 94 }
kaf24@8673 95
kaf24@8673 96 if ( unlikely((page = alloc_domheap_pages(
kaf24@8673 97 d, extent_order, flags)) == NULL) )
kaf24@8673 98 {
kaf24@8673 99 DPRINTK("Could not allocate order=%d extent: "
kaf24@8673 100 "id=%d flags=%x (%ld of %d)\n",
kaf24@8673 101 extent_order, d->domain_id, flags, i, nr_extents);
sos22@8688 102 goto out;
kaf24@8673 103 }
kaf24@8673 104
kaf24@8726 105 mfn = page_to_mfn(page);
kaf24@8673 106
kaf24@8673 107 if ( unlikely(__get_user(pfn, &extent_list[i]) != 0) )
sos22@8688 108 goto out;
kaf24@8673 109
kaf24@8694 110 if ( unlikely(shadow_mode_translate(d)) )
kaf24@8694 111 {
kaf24@8694 112 for ( j = 0; j < (1 << extent_order); j++ )
kaf24@8694 113 guest_physmap_add_page(d, pfn + j, mfn + j);
sos22@8688 114 }
kaf24@8694 115 else
kaf24@8694 116 {
kaf24@8694 117 for ( j = 0; j < (1 << extent_order); j++ )
kaf24@8694 118 set_pfn_from_mfn(mfn + j, pfn + j);
kaf24@8673 119
sos22@8688 120 /* Inform the domain of the new page's machine address. */
sos22@8688 121 if ( __put_user(mfn, &extent_list[i]) != 0 )
sos22@8688 122 goto out;
sos22@8688 123 }
kaf24@8673 124 }
kaf24@8673 125
sos22@8688 126 out:
sos22@8688 127 return i;
kaf24@8673 128 }
kaf24@8673 129
kaf24@8673 130 static long
kaf24@6486 131 decrease_reservation(
kaf24@6486 132 struct domain *d,
kaf24@6486 133 unsigned long *extent_list,
kaf24@6486 134 unsigned int nr_extents,
kaf24@6486 135 unsigned int extent_order,
kaf24@6607 136 unsigned int flags,
kaf24@6607 137 int *preempted)
kaf24@6486 138 {
kaf24@8726 139 struct page_info *page;
kaf24@8726 140 unsigned long i, j, gmfn, mfn;
kaf24@6486 141
kaf24@6486 142 if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
kaf24@6486 143 return 0;
kaf24@6486 144
kaf24@6486 145 for ( i = 0; i < nr_extents; i++ )
kaf24@6486 146 {
kaf24@6486 147 if ( hypercall_preempt_check() )
kaf24@6607 148 {
kaf24@6607 149 *preempted = 1;
kaf24@6486 150 return i;
kaf24@6607 151 }
kaf24@6486 152
kaf24@8726 153 if ( unlikely(__get_user(gmfn, &extent_list[i]) != 0) )
kaf24@6486 154 return i;
kaf24@6486 155
kaf24@6486 156 for ( j = 0; j < (1 << extent_order); j++ )
kaf24@6486 157 {
kaf24@8726 158 mfn = gmfn_to_mfn(d, gmfn + j);
sos22@8682 159 if ( unlikely(mfn >= max_page) )
kaf24@6486 160 {
kaf24@8694 161 DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
kaf24@8694 162 d->domain_id, mfn, max_page);
kaf24@6486 163 return i;
kaf24@6486 164 }
kaf24@6486 165
kaf24@8726 166 page = mfn_to_page(mfn);
kaf24@6486 167 if ( unlikely(!get_page(page, d)) )
kaf24@6486 168 {
kaf24@6486 169 DPRINTK("Bad page free for domain %u\n", d->domain_id);
kaf24@6486 170 return i;
kaf24@6486 171 }
kaf24@6486 172
kaf24@6486 173 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
kaf24@6486 174 put_page_and_type(page);
kaf24@6486 175
kaf24@6486 176 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
kaf24@6486 177 put_page(page);
kaf24@6486 178
kaf24@8726 179 guest_physmap_remove_page(d, gmfn + j, mfn);
kaf24@8694 180
kaf24@6486 181 put_page(page);
kaf24@6486 182 }
kaf24@6486 183 }
kaf24@6486 184
kaf24@6486 185 return nr_extents;
kaf24@6486 186 }
kaf24@6486 187
kaf24@6486 188 /*
kaf24@6486 189 * To allow safe resume of do_memory_op() after preemption, we need to know
kaf24@6486 190 * at what point in the page list to resume. For this purpose I steal the
kaf24@6486 191 * high-order bits of the @cmd parameter, which are otherwise unused and zero.
kaf24@6486 192 */
kaf24@6486 193 #define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */
kaf24@6486 194
kaf24@6486 195 long do_memory_op(int cmd, void *arg)
kaf24@6486 196 {
kaf24@6486 197 struct domain *d;
kaf24@6607 198 int rc, start_extent, op, flags = 0, preempted = 0;
kaf24@6486 199 struct xen_memory_reservation reservation;
kaf24@7959 200 domid_t domid;
kaf24@6486 201
kaf24@6486 202 op = cmd & ((1 << START_EXTENT_SHIFT) - 1);
kaf24@6486 203
kaf24@6486 204 switch ( op )
kaf24@6486 205 {
kaf24@6486 206 case XENMEM_increase_reservation:
kaf24@6486 207 case XENMEM_decrease_reservation:
kaf24@8673 208 case XENMEM_populate_physmap:
kaf24@6486 209 if ( copy_from_user(&reservation, arg, sizeof(reservation)) )
kaf24@6486 210 return -EFAULT;
kaf24@6486 211
kaf24@6486 212 start_extent = cmd >> START_EXTENT_SHIFT;
kaf24@6486 213 if ( unlikely(start_extent > reservation.nr_extents) )
kaf24@6486 214 return -EINVAL;
kaf24@6486 215
kaf24@6486 216 if ( reservation.extent_start != NULL )
kaf24@6486 217 reservation.extent_start += start_extent;
kaf24@6486 218 reservation.nr_extents -= start_extent;
kaf24@6486 219
kaf24@6701 220 if ( (reservation.address_bits != 0) &&
kaf24@6702 221 (reservation.address_bits <
kaf24@6702 222 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kaf24@6486 223 {
kaf24@6486 224 if ( reservation.address_bits < 31 )
kaf24@6486 225 return -ENOMEM;
kaf24@6486 226 flags = ALLOC_DOM_DMA;
kaf24@6486 227 }
kaf24@6486 228
kaf24@6486 229 if ( likely(reservation.domid == DOMID_SELF) )
kaf24@6486 230 d = current->domain;
kaf24@6486 231 else if ( !IS_PRIV(current->domain) )
kaf24@6486 232 return -EPERM;
kaf24@6486 233 else if ( (d = find_domain_by_id(reservation.domid)) == NULL )
kaf24@6486 234 return -ESRCH;
kaf24@6486 235
kaf24@8673 236 switch ( op )
kaf24@8673 237 {
kaf24@8673 238 case XENMEM_increase_reservation:
kaf24@8673 239 rc = increase_reservation(
kaf24@8673 240 d,
kaf24@8673 241 reservation.extent_start,
kaf24@8673 242 reservation.nr_extents,
kaf24@8673 243 reservation.extent_order,
kaf24@8673 244 flags,
kaf24@8673 245 &preempted);
kaf24@8673 246 break;
kaf24@8673 247 case XENMEM_decrease_reservation:
kaf24@8673 248 rc = decrease_reservation(
kaf24@8673 249 d,
kaf24@8673 250 reservation.extent_start,
kaf24@8673 251 reservation.nr_extents,
kaf24@8673 252 reservation.extent_order,
kaf24@8673 253 flags,
kaf24@8673 254 &preempted);
kaf24@8673 255 break;
kaf24@8673 256 case XENMEM_populate_physmap:
kaf24@8673 257 default:
kaf24@8673 258 rc = populate_physmap(
kaf24@8673 259 d,
kaf24@8673 260 reservation.extent_start,
kaf24@8673 261 reservation.nr_extents,
kaf24@8673 262 reservation.extent_order,
kaf24@8673 263 flags,
kaf24@8673 264 &preempted);
kaf24@8673 265 break;
kaf24@8673 266 }
kaf24@6486 267
kaf24@6486 268 if ( unlikely(reservation.domid != DOMID_SELF) )
kaf24@6486 269 put_domain(d);
kaf24@6486 270
kaf24@6486 271 rc += start_extent;
kaf24@6486 272
kaf24@6607 273 if ( preempted )
kaf24@6486 274 return hypercall2_create_continuation(
kaf24@6607 275 __HYPERVISOR_memory_op, op | (rc << START_EXTENT_SHIFT), arg);
kaf24@6607 276
kaf24@6486 277 break;
kaf24@6486 278
kaf24@6486 279 case XENMEM_maximum_ram_page:
kaf24@7959 280 rc = max_page;
kaf24@7959 281 break;
kaf24@7959 282
kaf24@7959 283 case XENMEM_current_reservation:
kaf24@7959 284 case XENMEM_maximum_reservation:
kaf24@7959 285 if ( get_user(domid, (domid_t *)arg) )
kaf24@6486 286 return -EFAULT;
kaf24@7959 287
kaf24@7959 288 if ( likely((domid = (unsigned long)arg) == DOMID_SELF) )
kaf24@7959 289 d = current->domain;
kaf24@7959 290 else if ( !IS_PRIV(current->domain) )
kaf24@7959 291 return -EPERM;
kaf24@7959 292 else if ( (d = find_domain_by_id(domid)) == NULL )
kaf24@7959 293 return -ESRCH;
kaf24@7959 294
kaf24@7959 295 rc = (op == XENMEM_current_reservation) ? d->tot_pages : d->max_pages;
kaf24@7959 296
kaf24@7959 297 if ( unlikely(domid != DOMID_SELF) )
kaf24@7959 298 put_domain(d);
kaf24@7959 299
kaf24@6486 300 break;
kaf24@6486 301
kaf24@6486 302 default:
kaf24@8059 303 rc = arch_memory_op(op, arg);
kaf24@6486 304 break;
kaf24@6486 305 }
kaf24@6486 306
kaf24@6486 307 return rc;
kaf24@6486 308 }
kaf24@6486 309
kaf24@6486 310 /*
kaf24@6486 311 * Local variables:
kaf24@6486 312 * mode: C
kaf24@6486 313 * c-set-style: "BSD"
kaf24@6486 314 * c-basic-offset: 4
kaf24@6486 315 * tab-width: 4
kaf24@6486 316 * indent-tabs-mode: nil
kaf24@6486 317 * End:
kaf24@6486 318 */