ia64/xen-unstable

annotate xen/common/memory.c @ 6607:ec11c5cca195

Fix preemption-check race in memory_op hypercall.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Sep 02 17:02:08 2005 +0000 (2005-09-02)
parents dd668f7527cb
children f27205ea60ef 29808fef9148
rev   line source
kaf24@6486 1 /******************************************************************************
kaf24@6486 2 * memory.c
kaf24@6486 3 *
kaf24@6486 4 * Code to handle memory-related requests.
kaf24@6486 5 *
kaf24@6486 6 * Copyright (c) 2003-2004, B Dragovic
kaf24@6486 7 * Copyright (c) 2003-2005, K A Fraser
kaf24@6486 8 */
kaf24@6486 9
kaf24@6486 10 #include <xen/config.h>
kaf24@6486 11 #include <xen/types.h>
kaf24@6486 12 #include <xen/lib.h>
kaf24@6486 13 #include <xen/mm.h>
kaf24@6486 14 #include <xen/perfc.h>
kaf24@6486 15 #include <xen/sched.h>
kaf24@6486 16 #include <xen/event.h>
kaf24@6486 17 #include <xen/shadow.h>
kaf24@6486 18 #include <asm/current.h>
kaf24@6486 19 #include <asm/hardirq.h>
kaf24@6486 20 #include <public/memory.h>
kaf24@6486 21
kaf24@6486 22 static long
kaf24@6486 23 increase_reservation(
kaf24@6486 24 struct domain *d,
kaf24@6486 25 unsigned long *extent_list,
kaf24@6486 26 unsigned int nr_extents,
kaf24@6486 27 unsigned int extent_order,
kaf24@6607 28 unsigned int flags,
kaf24@6607 29 int *preempted)
kaf24@6486 30 {
kaf24@6486 31 struct pfn_info *page;
kaf24@6486 32 unsigned long i;
kaf24@6486 33
kaf24@6486 34 if ( (extent_list != NULL)
kaf24@6486 35 && !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
kaf24@6486 36 return 0;
kaf24@6486 37
kaf24@6486 38 if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
kaf24@6486 39 {
kaf24@6486 40 DPRINTK("Only I/O-capable domains may allocate > order-0 memory.\n");
kaf24@6486 41 return 0;
kaf24@6486 42 }
kaf24@6486 43
kaf24@6486 44 for ( i = 0; i < nr_extents; i++ )
kaf24@6486 45 {
kaf24@6486 46 if ( hypercall_preempt_check() )
kaf24@6607 47 {
kaf24@6607 48 *preempted = 1;
kaf24@6486 49 return i;
kaf24@6607 50 }
kaf24@6486 51
kaf24@6486 52 if ( unlikely((page = alloc_domheap_pages(
kaf24@6486 53 d, extent_order, flags)) == NULL) )
kaf24@6486 54 {
kaf24@6486 55 DPRINTK("Could not allocate a frame\n");
kaf24@6486 56 return i;
kaf24@6486 57 }
kaf24@6486 58
kaf24@6486 59 /* Inform the domain of the new page's machine address. */
kaf24@6486 60 if ( (extent_list != NULL)
kaf24@6486 61 && (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
kaf24@6486 62 return i;
kaf24@6486 63 }
kaf24@6486 64
kaf24@6486 65 return nr_extents;
kaf24@6486 66 }
kaf24@6486 67
kaf24@6486 68 static long
kaf24@6486 69 decrease_reservation(
kaf24@6486 70 struct domain *d,
kaf24@6486 71 unsigned long *extent_list,
kaf24@6486 72 unsigned int nr_extents,
kaf24@6486 73 unsigned int extent_order,
kaf24@6607 74 unsigned int flags,
kaf24@6607 75 int *preempted)
kaf24@6486 76 {
kaf24@6486 77 struct pfn_info *page;
kaf24@6486 78 unsigned long i, j, mpfn;
kaf24@6486 79
kaf24@6486 80 if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
kaf24@6486 81 return 0;
kaf24@6486 82
kaf24@6486 83 for ( i = 0; i < nr_extents; i++ )
kaf24@6486 84 {
kaf24@6486 85 if ( hypercall_preempt_check() )
kaf24@6607 86 {
kaf24@6607 87 *preempted = 1;
kaf24@6486 88 return i;
kaf24@6607 89 }
kaf24@6486 90
kaf24@6486 91 if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
kaf24@6486 92 return i;
kaf24@6486 93
kaf24@6486 94 for ( j = 0; j < (1 << extent_order); j++ )
kaf24@6486 95 {
kaf24@6486 96 if ( unlikely((mpfn + j) >= max_page) )
kaf24@6486 97 {
kaf24@6486 98 DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
kaf24@6486 99 d->domain_id, mpfn + j, max_page);
kaf24@6486 100 return i;
kaf24@6486 101 }
kaf24@6486 102
kaf24@6486 103 page = &frame_table[mpfn + j];
kaf24@6486 104 if ( unlikely(!get_page(page, d)) )
kaf24@6486 105 {
kaf24@6486 106 DPRINTK("Bad page free for domain %u\n", d->domain_id);
kaf24@6486 107 return i;
kaf24@6486 108 }
kaf24@6486 109
kaf24@6486 110 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
kaf24@6486 111 put_page_and_type(page);
kaf24@6486 112
kaf24@6486 113 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
kaf24@6486 114 put_page(page);
kaf24@6486 115
kaf24@6486 116 shadow_sync_and_drop_references(d, page);
kaf24@6486 117
kaf24@6486 118 put_page(page);
kaf24@6486 119 }
kaf24@6486 120 }
kaf24@6486 121
kaf24@6486 122 return nr_extents;
kaf24@6486 123 }
kaf24@6486 124
kaf24@6486 125 /*
kaf24@6486 126 * To allow safe resume of do_memory_op() after preemption, we need to know
kaf24@6486 127 * at what point in the page list to resume. For this purpose I steal the
kaf24@6486 128 * high-order bits of the @cmd parameter, which are otherwise unused and zero.
kaf24@6486 129 */
kaf24@6486 130 #define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */
kaf24@6486 131
kaf24@6486 132 long do_memory_op(int cmd, void *arg)
kaf24@6486 133 {
kaf24@6486 134 struct domain *d;
kaf24@6607 135 int rc, start_extent, op, flags = 0, preempted = 0;
kaf24@6486 136 struct xen_memory_reservation reservation;
kaf24@6486 137
kaf24@6486 138 op = cmd & ((1 << START_EXTENT_SHIFT) - 1);
kaf24@6486 139
kaf24@6486 140 switch ( op )
kaf24@6486 141 {
kaf24@6486 142 case XENMEM_increase_reservation:
kaf24@6486 143 case XENMEM_decrease_reservation:
kaf24@6486 144 if ( copy_from_user(&reservation, arg, sizeof(reservation)) )
kaf24@6486 145 return -EFAULT;
kaf24@6486 146
kaf24@6486 147 start_extent = cmd >> START_EXTENT_SHIFT;
kaf24@6486 148 if ( unlikely(start_extent > reservation.nr_extents) )
kaf24@6486 149 return -EINVAL;
kaf24@6486 150
kaf24@6486 151 if ( reservation.extent_start != NULL )
kaf24@6486 152 reservation.extent_start += start_extent;
kaf24@6486 153 reservation.nr_extents -= start_extent;
kaf24@6486 154
kaf24@6486 155 if ( unlikely(reservation.address_bits != 0)
kaf24@6486 156 && (reservation.address_bits > (get_order(max_page)+PAGE_SHIFT)) )
kaf24@6486 157 {
kaf24@6486 158 if ( reservation.address_bits < 31 )
kaf24@6486 159 return -ENOMEM;
kaf24@6486 160 flags = ALLOC_DOM_DMA;
kaf24@6486 161 }
kaf24@6486 162
kaf24@6486 163 if ( likely(reservation.domid == DOMID_SELF) )
kaf24@6486 164 d = current->domain;
kaf24@6486 165 else if ( !IS_PRIV(current->domain) )
kaf24@6486 166 return -EPERM;
kaf24@6486 167 else if ( (d = find_domain_by_id(reservation.domid)) == NULL )
kaf24@6486 168 return -ESRCH;
kaf24@6486 169
kaf24@6486 170 rc = ((op == XENMEM_increase_reservation) ?
kaf24@6486 171 increase_reservation : decrease_reservation)(
kaf24@6486 172 d,
kaf24@6486 173 reservation.extent_start,
kaf24@6486 174 reservation.nr_extents,
kaf24@6486 175 reservation.extent_order,
kaf24@6607 176 flags,
kaf24@6607 177 &preempted);
kaf24@6486 178
kaf24@6486 179 if ( unlikely(reservation.domid != DOMID_SELF) )
kaf24@6486 180 put_domain(d);
kaf24@6486 181
kaf24@6486 182 rc += start_extent;
kaf24@6486 183
kaf24@6607 184 if ( preempted )
kaf24@6486 185 return hypercall2_create_continuation(
kaf24@6607 186 __HYPERVISOR_memory_op, op | (rc << START_EXTENT_SHIFT), arg);
kaf24@6607 187
kaf24@6486 188 break;
kaf24@6486 189
kaf24@6486 190 case XENMEM_maximum_ram_page:
kaf24@6486 191 if ( put_user(max_page, (unsigned long *)arg) )
kaf24@6486 192 return -EFAULT;
kaf24@6486 193 rc = -ENOSYS;
kaf24@6486 194 break;
kaf24@6486 195
kaf24@6486 196 default:
kaf24@6486 197 rc = -ENOSYS;
kaf24@6486 198 break;
kaf24@6486 199 }
kaf24@6486 200
kaf24@6486 201 return rc;
kaf24@6486 202 }
kaf24@6486 203
kaf24@6486 204 /*
kaf24@6486 205 * Local variables:
kaf24@6486 206 * mode: C
kaf24@6486 207 * c-set-style: "BSD"
kaf24@6486 208 * c-basic-offset: 4
kaf24@6486 209 * tab-width: 4
kaf24@6486 210 * indent-tabs-mode: nil
kaf24@6486 211 * End:
kaf24@6486 212 */