ia64/xen-unstable

view xen/common/memory.c @ 8581:4520b451a70e

Quieten debug printing on memory_op hypercall. Don't warn
about disallowed multipage allocation attempts.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Jan 12 15:44:44 2006 +0100 (2006-01-12)
parents d966b7a00959
children 17dc21008351 1580009f137c
line source
1 /******************************************************************************
2 * memory.c
3 *
4 * Code to handle memory-related requests.
5 *
6 * Copyright (c) 2003-2004, B Dragovic
7 * Copyright (c) 2003-2005, K A Fraser
8 */
10 #include <xen/config.h>
11 #include <xen/types.h>
12 #include <xen/lib.h>
13 #include <xen/mm.h>
14 #include <xen/perfc.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <xen/shadow.h>
18 #include <xen/iocap.h>
19 #include <asm/current.h>
20 #include <asm/hardirq.h>
21 #include <public/memory.h>
23 static long
24 increase_reservation(
25 struct domain *d,
26 unsigned long *extent_list,
27 unsigned int nr_extents,
28 unsigned int extent_order,
29 unsigned int flags,
30 int *preempted)
31 {
32 struct pfn_info *page;
33 unsigned int i;
35 if ( (extent_list != NULL) &&
36 !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
37 return 0;
39 if ( (extent_order != 0) &&
40 !multipage_allocation_permitted(current->domain) )
41 return 0;
43 for ( i = 0; i < nr_extents; i++ )
44 {
45 if ( hypercall_preempt_check() )
46 {
47 *preempted = 1;
48 return i;
49 }
51 if ( unlikely((page = alloc_domheap_pages(
52 d, extent_order, flags)) == NULL) )
53 {
54 DPRINTK("Could not allocate order=%d extent: "
55 "id=%d flags=%x (%d of %d)\n",
56 extent_order, d->domain_id, flags, i, nr_extents);
57 return i;
58 }
60 /* Inform the domain of the new page's machine address. */
61 if ( (extent_list != NULL) &&
62 (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
63 return i;
64 }
66 return nr_extents;
67 }
69 static long
70 decrease_reservation(
71 struct domain *d,
72 unsigned long *extent_list,
73 unsigned int nr_extents,
74 unsigned int extent_order,
75 unsigned int flags,
76 int *preempted)
77 {
78 struct pfn_info *page;
79 unsigned long i, j, mpfn;
81 if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
82 return 0;
84 for ( i = 0; i < nr_extents; i++ )
85 {
86 if ( hypercall_preempt_check() )
87 {
88 *preempted = 1;
89 return i;
90 }
92 if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
93 return i;
95 for ( j = 0; j < (1 << extent_order); j++ )
96 {
97 if ( unlikely((mpfn + j) >= max_page) )
98 {
99 DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
100 d->domain_id, mpfn + j, max_page);
101 return i;
102 }
104 page = pfn_to_page(mpfn + j);
105 if ( unlikely(!get_page(page, d)) )
106 {
107 DPRINTK("Bad page free for domain %u\n", d->domain_id);
108 return i;
109 }
111 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
112 put_page_and_type(page);
114 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
115 put_page(page);
117 shadow_sync_and_drop_references(d, page);
119 put_page(page);
120 }
121 }
123 return nr_extents;
124 }
126 /*
127 * To allow safe resume of do_memory_op() after preemption, we need to know
128 * at what point in the page list to resume. For this purpose I steal the
129 * high-order bits of the @cmd parameter, which are otherwise unused and zero.
130 */
131 #define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */
133 long do_memory_op(int cmd, void *arg)
134 {
135 struct domain *d;
136 int rc, start_extent, op, flags = 0, preempted = 0;
137 struct xen_memory_reservation reservation;
138 domid_t domid;
140 op = cmd & ((1 << START_EXTENT_SHIFT) - 1);
142 switch ( op )
143 {
144 case XENMEM_increase_reservation:
145 case XENMEM_decrease_reservation:
146 if ( copy_from_user(&reservation, arg, sizeof(reservation)) )
147 return -EFAULT;
149 start_extent = cmd >> START_EXTENT_SHIFT;
150 if ( unlikely(start_extent > reservation.nr_extents) )
151 return -EINVAL;
153 if ( reservation.extent_start != NULL )
154 reservation.extent_start += start_extent;
155 reservation.nr_extents -= start_extent;
157 if ( (reservation.address_bits != 0) &&
158 (reservation.address_bits <
159 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
160 {
161 if ( reservation.address_bits < 31 )
162 return -ENOMEM;
163 flags = ALLOC_DOM_DMA;
164 }
166 if ( likely(reservation.domid == DOMID_SELF) )
167 d = current->domain;
168 else if ( !IS_PRIV(current->domain) )
169 return -EPERM;
170 else if ( (d = find_domain_by_id(reservation.domid)) == NULL )
171 return -ESRCH;
173 rc = ((op == XENMEM_increase_reservation) ?
174 increase_reservation : decrease_reservation)(
175 d,
176 reservation.extent_start,
177 reservation.nr_extents,
178 reservation.extent_order,
179 flags,
180 &preempted);
182 if ( unlikely(reservation.domid != DOMID_SELF) )
183 put_domain(d);
185 rc += start_extent;
187 if ( preempted )
188 return hypercall2_create_continuation(
189 __HYPERVISOR_memory_op, op | (rc << START_EXTENT_SHIFT), arg);
191 break;
193 case XENMEM_maximum_ram_page:
194 rc = max_page;
195 break;
197 case XENMEM_current_reservation:
198 case XENMEM_maximum_reservation:
199 if ( get_user(domid, (domid_t *)arg) )
200 return -EFAULT;
202 if ( likely((domid = (unsigned long)arg) == DOMID_SELF) )
203 d = current->domain;
204 else if ( !IS_PRIV(current->domain) )
205 return -EPERM;
206 else if ( (d = find_domain_by_id(domid)) == NULL )
207 return -ESRCH;
209 rc = (op == XENMEM_current_reservation) ? d->tot_pages : d->max_pages;
211 if ( unlikely(domid != DOMID_SELF) )
212 put_domain(d);
214 break;
216 default:
217 rc = arch_memory_op(op, arg);
218 break;
219 }
221 return rc;
222 }
224 /*
225 * Local variables:
226 * mode: C
227 * c-set-style: "BSD"
228 * c-basic-offset: 4
229 * tab-width: 4
230 * indent-tabs-mode: nil
231 * End:
232 */