ia64/xen-unstable

view xen/common/xencomm.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents 3673926b2375
children
line source
1 /******************************************************************************
2 * xencomm.c
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) IBM Corp. 2006
19 *
20 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
21 * Tristan Gingold <tristan.gingold@bull.net>
22 * Isaku Yamahata <yamahata@valinux.co.jp> multiple page support
23 */
25 #include <xen/config.h>
26 #include <xen/mm.h>
27 #include <xen/sched.h>
28 #include <xen/xencomm.h>
29 #include <public/xen.h>
30 #include <public/xencomm.h>
32 #undef DEBUG
33 #ifdef DEBUG
34 #define xc_dprintk(f, a...) printk("[xencomm]" f , ## a)
35 #else
36 #define xc_dprintk(f, a...) ((void)0)
37 #endif
39 static void *
40 xencomm_vaddr(unsigned long paddr, struct page_info *page)
41 {
42 return (void*)((paddr & ~PAGE_MASK) | (unsigned long)page_to_virt(page));
43 }
45 /* get_page() to prevent another vcpu freeing the page. */
46 static int
47 xencomm_get_page(unsigned long paddr, struct page_info **page)
48 {
49 unsigned long maddr = paddr_to_maddr(paddr);
50 if ( maddr == 0 )
51 return -EFAULT;
53 *page = maddr_to_page(maddr);
54 if ( !get_page(*page, current->domain) )
55 {
56 /*
57 * This page might be a page granted by another domain, or this page
58 * is freed with decrease reservation hypercall at the same time.
59 */
60 gdprintk(XENLOG_WARNING,
61 "bad page is passed. paddr 0x%lx maddr 0x%lx\n",
62 paddr, maddr);
63 return -EFAULT;
64 }
66 return 0;
67 }
69 /* check if struct desc doesn't cross page boundry */
70 static int
71 xencomm_desc_cross_page_boundary(unsigned long paddr)
72 {
73 unsigned long offset = paddr & ~PAGE_MASK;
74 if ( offset > PAGE_SIZE - sizeof(struct xencomm_desc) )
75 return 1;
76 return 0;
77 }
79 struct xencomm_ctxt {
80 struct xencomm_desc __user *desc_in_paddr;
81 uint32_t nr_addrs;
83 struct page_info *page;
84 unsigned long *address;
85 };
87 static uint32_t
88 xencomm_ctxt_nr_addrs(const struct xencomm_ctxt *ctxt)
89 {
90 return ctxt->nr_addrs;
91 }
93 static unsigned long*
94 xencomm_ctxt_address(struct xencomm_ctxt *ctxt)
95 {
96 return ctxt->address;
97 }
99 static int
100 xencomm_ctxt_init(const void *handle, struct xencomm_ctxt *ctxt)
101 {
102 struct page_info *page;
103 struct xencomm_desc *desc;
104 int ret;
106 /* Avoid unaligned access. */
107 if ( ((unsigned long)handle % __alignof__(*desc)) != 0 )
108 return -EINVAL;
109 if ( xencomm_desc_cross_page_boundary((unsigned long)handle) )
110 return -EINVAL;
112 /* First we need to access the descriptor. */
113 ret = xencomm_get_page((unsigned long)handle, &page);
114 if ( ret )
115 return ret;
117 desc = xencomm_vaddr((unsigned long)handle, page);
118 if ( desc->magic != XENCOMM_MAGIC )
119 {
120 printk("%s: error: %p magic was 0x%x\n", __func__, desc, desc->magic);
121 put_page(page);
122 return -EINVAL;
123 }
125 /* Copy before use: It is possible for a guest to modify concurrently. */
126 ctxt->nr_addrs = desc->nr_addrs;
127 ctxt->desc_in_paddr = (struct xencomm_desc*)handle;
128 ctxt->page = page;
129 ctxt->address = &desc->address[0];
130 return 0;
131 }
133 /*
134 * Calculate the vaddr of &ctxt->desc_in_paddr->address[i] and get_page().
135 * And put the results in ctxt->page and ctxt->address.
136 * If there is the previous page, put_page().
137 *
138 * A guest domain passes the array, ctxt->desc_in_paddr->address[].
139 * It is gpaddr-contiguous, but not maddr-contiguous so that
140 * we can't obtain the vaddr by simple offsetting.
141 * We need to convert gpaddr, &ctxt->desc_in_paddr->address[i],
142 * into maddr and then convert it to the xen virtual address in order
143 * to access there.
144 * The conversion can be optimized out by using the last result of
145 * ctxt->address because we access the array sequentially.
146 * The conversion, gpaddr -> maddr -> vaddr, is necessary only when
147 * crossing page boundary.
148 */
149 static int
150 xencomm_ctxt_next(struct xencomm_ctxt *ctxt, int i)
151 {
152 unsigned long paddr;
153 struct page_info *page;
154 int ret;
156 BUG_ON(i >= ctxt->nr_addrs);
158 /* For i == 0 case we already calculated it in xencomm_ctxt_init(). */
159 if ( i != 0 )
160 ctxt->address++;
162 if ( ((unsigned long)ctxt->address & ~PAGE_MASK) != 0 )
163 return 0;
165 /* Crossing page boundary: machine address must be calculated. */
166 paddr = (unsigned long)&ctxt->desc_in_paddr->address[i];
167 ret = xencomm_get_page(paddr, &page);
168 if ( ret )
169 return ret;
171 put_page(ctxt->page);
172 ctxt->page = page;
173 ctxt->address = xencomm_vaddr(paddr, page);
175 return 0;
176 }
178 static void
179 xencomm_ctxt_done(struct xencomm_ctxt *ctxt)
180 {
181 put_page(ctxt->page);
182 }
184 static int
185 xencomm_copy_chunk_from(
186 unsigned long to, unsigned long paddr, unsigned int len)
187 {
188 struct page_info *page;
189 int res;
191 do {
192 res = xencomm_get_page(paddr, &page);
193 } while ( res == -EAGAIN );
195 if ( res )
196 return res;
198 xc_dprintk("%lx[%d] -> %lx\n",
199 (unsigned long)xencomm_vaddr(paddr, page), len, to);
201 memcpy((void *)to, xencomm_vaddr(paddr, page), len);
202 put_page(page);
204 return 0;
205 }
207 static unsigned long
208 xencomm_inline_from_guest(
209 void *to, const void *from, unsigned int n, unsigned int skip)
210 {
211 unsigned long src_paddr = xencomm_inline_addr(from) + skip;
213 while ( n > 0 )
214 {
215 unsigned int chunksz, bytes;
217 chunksz = PAGE_SIZE - (src_paddr % PAGE_SIZE);
218 bytes = min(chunksz, n);
220 if ( xencomm_copy_chunk_from((unsigned long)to, src_paddr, bytes) )
221 return n;
222 src_paddr += bytes;
223 to += bytes;
224 n -= bytes;
225 }
227 /* Always successful. */
228 return 0;
229 }
231 /**
232 * xencomm_copy_from_guest: Copy a block of data from domain space.
233 * @to: Machine address.
234 * @from: Physical address to a xencomm buffer descriptor.
235 * @n: Number of bytes to copy.
236 * @skip: Number of bytes from the start to skip.
237 *
238 * Copy data from domain to hypervisor.
239 *
240 * Returns number of bytes that could not be copied.
241 * On success, this will be zero.
242 */
243 unsigned long
244 xencomm_copy_from_guest(
245 void *to, const void *from, unsigned int n, unsigned int skip)
246 {
247 struct xencomm_ctxt ctxt;
248 unsigned int from_pos = 0;
249 unsigned int to_pos = 0;
250 unsigned int i = 0;
252 if ( xencomm_is_inline(from) )
253 return xencomm_inline_from_guest(to, from, n, skip);
255 if ( xencomm_ctxt_init(from, &ctxt) )
256 return n;
258 /* Iterate through the descriptor, copying up to a page at a time */
259 while ( (to_pos < n) && (i < xencomm_ctxt_nr_addrs(&ctxt)) )
260 {
261 unsigned long src_paddr;
262 unsigned int pgoffset, chunksz, chunk_skip;
264 if ( xencomm_ctxt_next(&ctxt, i) )
265 goto out;
266 src_paddr = *xencomm_ctxt_address(&ctxt);
267 if ( src_paddr == XENCOMM_INVALID )
268 {
269 i++;
270 continue;
271 }
273 pgoffset = src_paddr % PAGE_SIZE;
274 chunksz = PAGE_SIZE - pgoffset;
276 chunk_skip = min(chunksz, skip);
277 from_pos += chunk_skip;
278 chunksz -= chunk_skip;
279 skip -= chunk_skip;
281 if ( skip == 0 && chunksz > 0 )
282 {
283 unsigned int bytes = min(chunksz, n - to_pos);
285 if ( xencomm_copy_chunk_from((unsigned long)to + to_pos,
286 src_paddr + chunk_skip, bytes) )
287 goto out;
288 from_pos += bytes;
289 to_pos += bytes;
290 }
292 i++;
293 }
295 out:
296 xencomm_ctxt_done(&ctxt);
297 return n - to_pos;
298 }
300 static int
301 xencomm_copy_chunk_to(
302 unsigned long paddr, unsigned long from, unsigned int len)
303 {
304 struct page_info *page;
305 int res;
307 do {
308 res = xencomm_get_page(paddr, &page);
309 } while ( res == -EAGAIN );
311 if ( res )
312 return res;
314 xc_dprintk("%lx[%d] -> %lx\n", from, len,
315 (unsigned long)xencomm_vaddr(paddr, page));
317 memcpy(xencomm_vaddr(paddr, page), (void *)from, len);
318 xencomm_mark_dirty((unsigned long)xencomm_vaddr(paddr, page), len);
319 put_page(page);
321 return 0;
322 }
324 static unsigned long
325 xencomm_inline_to_guest(
326 void *to, const void *from, unsigned int n, unsigned int skip)
327 {
328 unsigned long dest_paddr = xencomm_inline_addr(to) + skip;
330 while ( n > 0 )
331 {
332 unsigned int chunksz, bytes;
334 chunksz = PAGE_SIZE - (dest_paddr % PAGE_SIZE);
335 bytes = min(chunksz, n);
337 if ( xencomm_copy_chunk_to(dest_paddr, (unsigned long)from, bytes) )
338 return n;
339 dest_paddr += bytes;
340 from += bytes;
341 n -= bytes;
342 }
344 /* Always successful. */
345 return 0;
346 }
348 /**
349 * xencomm_copy_to_guest: Copy a block of data to domain space.
350 * @to: Physical address to xencomm buffer descriptor.
351 * @from: Machine address.
352 * @n: Number of bytes to copy.
353 * @skip: Number of bytes from the start to skip.
354 *
355 * Copy data from hypervisor to domain.
356 *
357 * Returns number of bytes that could not be copied.
358 * On success, this will be zero.
359 */
360 unsigned long
361 xencomm_copy_to_guest(
362 void *to, const void *from, unsigned int n, unsigned int skip)
363 {
364 struct xencomm_ctxt ctxt;
365 unsigned int from_pos = 0;
366 unsigned int to_pos = 0;
367 unsigned int i = 0;
369 if ( xencomm_is_inline(to) )
370 return xencomm_inline_to_guest(to, from, n, skip);
372 if ( xencomm_ctxt_init(to, &ctxt) )
373 return n;
375 /* Iterate through the descriptor, copying up to a page at a time */
376 while ( (from_pos < n) && (i < xencomm_ctxt_nr_addrs(&ctxt)) )
377 {
378 unsigned long dest_paddr;
379 unsigned int pgoffset, chunksz, chunk_skip;
381 if ( xencomm_ctxt_next(&ctxt, i) )
382 goto out;
383 dest_paddr = *xencomm_ctxt_address(&ctxt);
384 if ( dest_paddr == XENCOMM_INVALID )
385 {
386 i++;
387 continue;
388 }
390 pgoffset = dest_paddr % PAGE_SIZE;
391 chunksz = PAGE_SIZE - pgoffset;
393 chunk_skip = min(chunksz, skip);
394 to_pos += chunk_skip;
395 chunksz -= chunk_skip;
396 skip -= chunk_skip;
398 if ( skip == 0 && chunksz > 0 )
399 {
400 unsigned int bytes = min(chunksz, n - from_pos);
402 if ( xencomm_copy_chunk_to(dest_paddr + chunk_skip,
403 (unsigned long)from + from_pos, bytes) )
404 goto out;
405 from_pos += bytes;
406 to_pos += bytes;
407 }
409 i++;
410 }
412 out:
413 xencomm_ctxt_done(&ctxt);
414 return n - from_pos;
415 }
417 static int xencomm_inline_add_offset(void **handle, unsigned int bytes)
418 {
419 *handle += bytes;
420 return 0;
421 }
423 /* Offset page addresses in 'handle' to skip 'bytes' bytes. Set completely
424 * exhausted pages to XENCOMM_INVALID. */
425 int xencomm_add_offset(void **handle, unsigned int bytes)
426 {
427 struct xencomm_ctxt ctxt;
428 int i = 0;
429 int res = 0;
431 if ( xencomm_is_inline(*handle) )
432 return xencomm_inline_add_offset(handle, bytes);
434 res = xencomm_ctxt_init(handle, &ctxt);
435 if ( res != 0 )
436 return res;
438 /* Iterate through the descriptor incrementing addresses */
439 while ( (bytes > 0) && (i < xencomm_ctxt_nr_addrs(&ctxt)) )
440 {
441 unsigned long *address;
442 unsigned long dest_paddr;
443 unsigned int pgoffset, chunksz, chunk_skip;
445 res = xencomm_ctxt_next(&ctxt, i);
446 if ( res )
447 goto out;
448 address = xencomm_ctxt_address(&ctxt);
449 dest_paddr = *address;
450 if ( dest_paddr == XENCOMM_INVALID )
451 {
452 i++;
453 continue;
454 }
456 pgoffset = dest_paddr % PAGE_SIZE;
457 chunksz = PAGE_SIZE - pgoffset;
459 chunk_skip = min(chunksz, bytes);
460 if ( chunk_skip == chunksz )
461 *address = XENCOMM_INVALID; /* exhausted this page */
462 else
463 *address += chunk_skip;
464 bytes -= chunk_skip;
466 i++;
467 }
469 out:
470 xencomm_ctxt_done(&ctxt);
471 return res;
472 }
474 int xencomm_handle_is_null(void *handle)
475 {
476 struct xencomm_ctxt ctxt;
477 int i;
478 int res = 1;
480 if ( xencomm_is_inline(handle) )
481 return xencomm_inline_addr(handle) == 0;
483 if ( xencomm_ctxt_init(handle, &ctxt) )
484 return 1;
486 for ( i = 0; i < xencomm_ctxt_nr_addrs(&ctxt); i++ )
487 {
488 if ( xencomm_ctxt_next(&ctxt, i) )
489 goto out;
490 if ( *xencomm_ctxt_address(&ctxt) != XENCOMM_INVALID )
491 {
492 res = 0;
493 goto out;
494 }
495 }
497 out:
498 xencomm_ctxt_done(&ctxt);
499 return res;
500 }
502 /*
503 * Local variables:
504 * mode: C
505 * c-set-style: "BSD"
506 * c-basic-offset: 4
507 * tab-width: 4
508 * indent-tabs-mode: nil
509 * End:
510 */