ia64/xen-unstable

view xen/common/memory.c @ 6702:e3fd0fa58364

Rename get_order() to get_order_from_bytes() and add
new function get_order_from_pages(). Fix
HYPERVISOR_memory_op(), properly this time.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Sep 08 17:25:52 2005 +0000 (2005-09-08)
parents c2705e74efba
children 2704a88c3295 cdfa7dd00c44
line source
1 /******************************************************************************
2 * memory.c
3 *
4 * Code to handle memory-related requests.
5 *
6 * Copyright (c) 2003-2004, B Dragovic
7 * Copyright (c) 2003-2005, K A Fraser
8 */
10 #include <xen/config.h>
11 #include <xen/types.h>
12 #include <xen/lib.h>
13 #include <xen/mm.h>
14 #include <xen/perfc.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <xen/shadow.h>
18 #include <asm/current.h>
19 #include <asm/hardirq.h>
20 #include <public/memory.h>
22 static long
23 increase_reservation(
24 struct domain *d,
25 unsigned long *extent_list,
26 unsigned int nr_extents,
27 unsigned int extent_order,
28 unsigned int flags,
29 int *preempted)
30 {
31 struct pfn_info *page;
32 unsigned long i;
34 if ( (extent_list != NULL) &&
35 !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
36 return 0;
38 if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
39 {
40 DPRINTK("Only I/O-capable domains may allocate > order-0 memory.\n");
41 return 0;
42 }
44 for ( i = 0; i < nr_extents; i++ )
45 {
46 if ( hypercall_preempt_check() )
47 {
48 *preempted = 1;
49 return i;
50 }
52 if ( unlikely((page = alloc_domheap_pages(
53 d, extent_order, flags)) == NULL) )
54 {
55 DPRINTK("Could not allocate order=%d extent: id=%d flags=%x\n",
56 extent_order, d->domain_id, flags);
57 return i;
58 }
60 /* Inform the domain of the new page's machine address. */
61 if ( (extent_list != NULL) &&
62 (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
63 return i;
64 }
66 return nr_extents;
67 }
69 static long
70 decrease_reservation(
71 struct domain *d,
72 unsigned long *extent_list,
73 unsigned int nr_extents,
74 unsigned int extent_order,
75 unsigned int flags,
76 int *preempted)
77 {
78 struct pfn_info *page;
79 unsigned long i, j, mpfn;
81 if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
82 return 0;
84 for ( i = 0; i < nr_extents; i++ )
85 {
86 if ( hypercall_preempt_check() )
87 {
88 *preempted = 1;
89 return i;
90 }
92 if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
93 return i;
95 for ( j = 0; j < (1 << extent_order); j++ )
96 {
97 if ( unlikely((mpfn + j) >= max_page) )
98 {
99 DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
100 d->domain_id, mpfn + j, max_page);
101 return i;
102 }
104 page = &frame_table[mpfn + j];
105 if ( unlikely(!get_page(page, d)) )
106 {
107 DPRINTK("Bad page free for domain %u\n", d->domain_id);
108 return i;
109 }
111 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
112 put_page_and_type(page);
114 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
115 put_page(page);
117 shadow_sync_and_drop_references(d, page);
119 put_page(page);
120 }
121 }
123 return nr_extents;
124 }
126 /*
127 * To allow safe resume of do_memory_op() after preemption, we need to know
128 * at what point in the page list to resume. For this purpose I steal the
129 * high-order bits of the @cmd parameter, which are otherwise unused and zero.
130 */
131 #define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */
133 long do_memory_op(int cmd, void *arg)
134 {
135 struct domain *d;
136 int rc, start_extent, op, flags = 0, preempted = 0;
137 struct xen_memory_reservation reservation;
139 op = cmd & ((1 << START_EXTENT_SHIFT) - 1);
141 switch ( op )
142 {
143 case XENMEM_increase_reservation:
144 case XENMEM_decrease_reservation:
145 if ( copy_from_user(&reservation, arg, sizeof(reservation)) )
146 return -EFAULT;
148 start_extent = cmd >> START_EXTENT_SHIFT;
149 if ( unlikely(start_extent > reservation.nr_extents) )
150 return -EINVAL;
152 if ( reservation.extent_start != NULL )
153 reservation.extent_start += start_extent;
154 reservation.nr_extents -= start_extent;
156 if ( (reservation.address_bits != 0) &&
157 (reservation.address_bits <
158 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
159 {
160 if ( reservation.address_bits < 31 )
161 return -ENOMEM;
162 flags = ALLOC_DOM_DMA;
163 }
165 if ( likely(reservation.domid == DOMID_SELF) )
166 d = current->domain;
167 else if ( !IS_PRIV(current->domain) )
168 return -EPERM;
169 else if ( (d = find_domain_by_id(reservation.domid)) == NULL )
170 return -ESRCH;
172 rc = ((op == XENMEM_increase_reservation) ?
173 increase_reservation : decrease_reservation)(
174 d,
175 reservation.extent_start,
176 reservation.nr_extents,
177 reservation.extent_order,
178 flags,
179 &preempted);
181 if ( unlikely(reservation.domid != DOMID_SELF) )
182 put_domain(d);
184 rc += start_extent;
186 if ( preempted )
187 return hypercall2_create_continuation(
188 __HYPERVISOR_memory_op, op | (rc << START_EXTENT_SHIFT), arg);
190 break;
192 case XENMEM_maximum_ram_page:
193 if ( put_user(max_page, (unsigned long *)arg) )
194 return -EFAULT;
195 rc = 0;
196 break;
198 default:
199 rc = -ENOSYS;
200 break;
201 }
203 return rc;
204 }
206 /*
207 * Local variables:
208 * mode: C
209 * c-set-style: "BSD"
210 * c-basic-offset: 4
211 * tab-width: 4
212 * indent-tabs-mode: nil
213 * End:
214 */