ia64/xen-unstable

view xen/common/memory.c @ 9776:72f9c751d3ea

Replace &foo[0] with foo where the latter seems cleaner
(which is usually, and particularly when its an argument
to one of the bitops functions).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Apr 19 18:32:20 2006 +0100 (2006-04-19)
parents fbeb0a5b7219
children 4e1b8be54311
line source
1 /******************************************************************************
2 * memory.c
3 *
4 * Code to handle memory-related requests.
5 *
6 * Copyright (c) 2003-2004, B Dragovic
7 * Copyright (c) 2003-2005, K A Fraser
8 */
10 #include <xen/config.h>
11 #include <xen/types.h>
12 #include <xen/lib.h>
13 #include <xen/mm.h>
14 #include <xen/perfc.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <xen/shadow.h>
18 #include <xen/iocap.h>
19 #include <xen/guest_access.h>
20 #include <asm/current.h>
21 #include <asm/hardirq.h>
22 #include <public/memory.h>
24 /*
25 * To allow safe resume of do_memory_op() after preemption, we need to know
26 * at what point in the page list to resume. For this purpose I steal the
27 * high-order bits of the @cmd parameter, which are otherwise unused and zero.
28 */
29 #define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */
31 static long
32 increase_reservation(
33 struct domain *d,
34 GUEST_HANDLE(ulong) extent_list,
35 unsigned int nr_extents,
36 unsigned int extent_order,
37 unsigned int flags,
38 int *preempted)
39 {
40 struct page_info *page;
41 unsigned long i, mfn;
43 if ( !guest_handle_is_null(extent_list) &&
44 !guest_handle_okay(extent_list, nr_extents) )
45 return 0;
47 if ( (extent_order != 0) &&
48 !multipage_allocation_permitted(current->domain) )
49 return 0;
51 for ( i = 0; i < nr_extents; i++ )
52 {
53 if ( hypercall_preempt_check() )
54 {
55 *preempted = 1;
56 return i;
57 }
59 if ( unlikely((page = alloc_domheap_pages(
60 d, extent_order, flags)) == NULL) )
61 {
62 DPRINTK("Could not allocate order=%d extent: "
63 "id=%d flags=%x (%ld of %d)\n",
64 extent_order, d->domain_id, flags, i, nr_extents);
65 return i;
66 }
68 /* Inform the domain of the new page's machine address. */
69 if ( !guest_handle_is_null(extent_list) )
70 {
71 mfn = page_to_mfn(page);
72 if ( unlikely(__copy_to_guest_offset(extent_list, i, &mfn, 1)) )
73 return i;
74 }
75 }
77 return nr_extents;
78 }
80 static long
81 populate_physmap(
82 struct domain *d,
83 GUEST_HANDLE(ulong) extent_list,
84 unsigned int nr_extents,
85 unsigned int extent_order,
86 unsigned int flags,
87 int *preempted)
88 {
89 struct page_info *page;
90 unsigned long i, j, gpfn, mfn;
92 if ( !guest_handle_okay(extent_list, nr_extents) )
93 return 0;
95 if ( (extent_order != 0) &&
96 !multipage_allocation_permitted(current->domain) )
97 return 0;
99 for ( i = 0; i < nr_extents; i++ )
100 {
101 if ( hypercall_preempt_check() )
102 {
103 *preempted = 1;
104 goto out;
105 }
107 if ( unlikely(__copy_from_guest_offset(&gpfn, extent_list, i, 1)) )
108 goto out;
110 if ( unlikely((page = alloc_domheap_pages(
111 d, extent_order, flags)) == NULL) )
112 {
113 DPRINTK("Could not allocate order=%d extent: "
114 "id=%d flags=%x (%ld of %d)\n",
115 extent_order, d->domain_id, flags, i, nr_extents);
116 goto out;
117 }
119 mfn = page_to_mfn(page);
121 if ( unlikely(shadow_mode_translate(d)) )
122 {
123 for ( j = 0; j < (1 << extent_order); j++ )
124 guest_physmap_add_page(d, gpfn + j, mfn + j);
125 }
126 else
127 {
128 for ( j = 0; j < (1 << extent_order); j++ )
129 set_gpfn_from_mfn(mfn + j, gpfn + j);
131 /* Inform the domain of the new page's machine address. */
132 if ( unlikely(__copy_to_guest_offset(extent_list, i, &mfn, 1)) )
133 goto out;
134 }
135 }
137 out:
138 return i;
139 }
141 int
142 guest_remove_page(
143 struct domain *d,
144 unsigned long gmfn)
145 {
146 struct page_info *page;
147 unsigned long mfn;
149 mfn = gmfn_to_mfn(d, gmfn);
150 if ( unlikely(!mfn_valid(mfn)) )
151 {
152 DPRINTK("Domain %u page number %lx invalid\n",
153 d->domain_id, mfn);
154 return 0;
155 }
157 page = mfn_to_page(mfn);
158 if ( unlikely(!get_page(page, d)) )
159 {
160 DPRINTK("Bad page free for domain %u\n", d->domain_id);
161 return 0;
162 }
164 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
165 put_page_and_type(page);
167 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
168 put_page(page);
170 guest_physmap_remove_page(d, gmfn, mfn);
172 put_page(page);
174 return 1;
175 }
177 static long
178 decrease_reservation(
179 struct domain *d,
180 GUEST_HANDLE(ulong) extent_list,
181 unsigned int nr_extents,
182 unsigned int extent_order,
183 unsigned int flags,
184 int *preempted)
185 {
186 unsigned long i, j, gmfn;
188 if ( !guest_handle_okay(extent_list, nr_extents) )
189 return 0;
191 for ( i = 0; i < nr_extents; i++ )
192 {
193 if ( hypercall_preempt_check() )
194 {
195 *preempted = 1;
196 return i;
197 }
199 if ( unlikely(__copy_from_guest_offset(&gmfn, extent_list, i, 1)) )
200 return i;
202 for ( j = 0; j < (1 << extent_order); j++ )
203 {
204 if ( !guest_remove_page(d, gmfn + j) )
205 return i;
206 }
207 }
209 return nr_extents;
210 }
212 static long
213 translate_gpfn_list(
214 GUEST_HANDLE(xen_translate_gpfn_list_t) uop, unsigned long *progress)
215 {
216 struct xen_translate_gpfn_list op;
217 unsigned long i, gpfn, mfn;
218 struct domain *d;
220 if ( copy_from_guest(&op, uop, 1) )
221 return -EFAULT;
223 /* Is size too large for us to encode a continuation? */
224 if ( op.nr_gpfns > (ULONG_MAX >> START_EXTENT_SHIFT) )
225 return -EINVAL;
227 if ( !guest_handle_okay(op.gpfn_list, op.nr_gpfns) ||
228 !guest_handle_okay(op.mfn_list, op.nr_gpfns) )
229 return -EFAULT;
231 if ( op.domid == DOMID_SELF )
232 op.domid = current->domain->domain_id;
233 else if ( !IS_PRIV(current->domain) )
234 return -EPERM;
236 if ( (d = find_domain_by_id(op.domid)) == NULL )
237 return -ESRCH;
239 if ( !shadow_mode_translate(d) )
240 {
241 put_domain(d);
242 return -EINVAL;
243 }
245 for ( i = *progress; i < op.nr_gpfns; i++ )
246 {
247 if ( hypercall_preempt_check() )
248 {
249 put_domain(d);
250 *progress = i;
251 return -EAGAIN;
252 }
254 if ( unlikely(__copy_from_guest_offset(&gpfn, op.gpfn_list, i, 1)) )
255 {
256 put_domain(d);
257 return -EFAULT;
258 }
260 mfn = gmfn_to_mfn(d, gpfn);
262 if ( unlikely(__copy_to_guest_offset(op.mfn_list, i, &mfn, 1)) )
263 {
264 put_domain(d);
265 return -EFAULT;
266 }
267 }
269 put_domain(d);
270 return 0;
271 }
273 long do_memory_op(unsigned long cmd, GUEST_HANDLE(void) arg)
274 {
275 struct domain *d;
276 int rc, op, flags = 0, preempted = 0;
277 unsigned long start_extent, progress;
278 struct xen_memory_reservation reservation;
279 domid_t domid;
281 op = cmd & ((1 << START_EXTENT_SHIFT) - 1);
283 switch ( op )
284 {
285 case XENMEM_increase_reservation:
286 case XENMEM_decrease_reservation:
287 case XENMEM_populate_physmap:
288 if ( copy_from_guest(&reservation, arg, 1) )
289 return -EFAULT;
291 /* Is size too large for us to encode a continuation? */
292 if ( reservation.nr_extents > (ULONG_MAX >> START_EXTENT_SHIFT) )
293 return -EINVAL;
295 start_extent = cmd >> START_EXTENT_SHIFT;
296 if ( unlikely(start_extent > reservation.nr_extents) )
297 return -EINVAL;
299 if ( !guest_handle_is_null(reservation.extent_start) )
300 guest_handle_add_offset(reservation.extent_start, start_extent);
301 reservation.nr_extents -= start_extent;
303 if ( (reservation.address_bits != 0) &&
304 (reservation.address_bits <
305 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
306 {
307 if ( reservation.address_bits < 31 )
308 return -ENOMEM;
309 flags = ALLOC_DOM_DMA;
310 }
312 if ( likely(reservation.domid == DOMID_SELF) )
313 d = current->domain;
314 else if ( !IS_PRIV(current->domain) )
315 return -EPERM;
316 else if ( (d = find_domain_by_id(reservation.domid)) == NULL )
317 return -ESRCH;
319 switch ( op )
320 {
321 case XENMEM_increase_reservation:
322 rc = increase_reservation(
323 d,
324 reservation.extent_start,
325 reservation.nr_extents,
326 reservation.extent_order,
327 flags,
328 &preempted);
329 break;
330 case XENMEM_decrease_reservation:
331 rc = decrease_reservation(
332 d,
333 reservation.extent_start,
334 reservation.nr_extents,
335 reservation.extent_order,
336 flags,
337 &preempted);
338 break;
339 case XENMEM_populate_physmap:
340 default:
341 rc = populate_physmap(
342 d,
343 reservation.extent_start,
344 reservation.nr_extents,
345 reservation.extent_order,
346 flags,
347 &preempted);
348 break;
349 }
351 if ( unlikely(reservation.domid != DOMID_SELF) )
352 put_domain(d);
354 rc += start_extent;
356 if ( preempted )
357 return hypercall_create_continuation(
358 __HYPERVISOR_memory_op, "lh",
359 op | (rc << START_EXTENT_SHIFT), arg);
361 break;
363 case XENMEM_maximum_ram_page:
364 rc = max_page;
365 break;
367 case XENMEM_current_reservation:
368 case XENMEM_maximum_reservation:
369 if ( copy_from_guest(&domid, arg, 1) )
370 return -EFAULT;
372 if ( likely(domid == DOMID_SELF) )
373 d = current->domain;
374 else if ( !IS_PRIV(current->domain) )
375 return -EPERM;
376 else if ( (d = find_domain_by_id(domid)) == NULL )
377 return -ESRCH;
379 rc = (op == XENMEM_current_reservation) ? d->tot_pages : d->max_pages;
381 if ( unlikely(domid != DOMID_SELF) )
382 put_domain(d);
384 break;
386 case XENMEM_translate_gpfn_list:
387 progress = cmd >> START_EXTENT_SHIFT;
388 rc = translate_gpfn_list(
389 guest_handle_cast(arg, xen_translate_gpfn_list_t),
390 &progress);
391 if ( rc == -EAGAIN )
392 return hypercall_create_continuation(
393 __HYPERVISOR_memory_op, "lh",
394 op | (progress << START_EXTENT_SHIFT), arg);
395 break;
397 default:
398 rc = arch_memory_op(op, arg);
399 break;
400 }
402 return rc;
403 }
405 /*
406 * Local variables:
407 * mode: C
408 * c-set-style: "BSD"
409 * c-basic-offset: 4
410 * tab-width: 4
411 * indent-tabs-mode: nil
412 * End:
413 */