ia64/xen-unstable

view xen/common/xencomm.c @ 15896:42d4313b5fdd

[IA64] update .hgignore for xenitp

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author Alex Williamson <alex.williamson@hp.com>
date Mon Sep 24 14:21:02 2007 -0600 (2007-09-24)
parents adad9f3820f1
children 146f214a0e63
line source
1 /******************************************************************************
2 * xencomm.c
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) IBM Corp. 2006
19 *
20 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
21 * Tristan Gingold <tristan.gingold@bull.net>
22 * Isaku Yamahata <yamahata@valinux.co.jp> multiple page support
23 */
25 #include <xen/config.h>
26 #include <xen/mm.h>
27 #include <xen/sched.h>
28 #include <xen/xencomm.h>
29 #include <public/xen.h>
30 #include <public/xencomm.h>
32 #undef DEBUG
33 #ifdef DEBUG
34 #define xc_dprintk(f, a...) printk("[xencomm]" f , ## a)
35 #else
36 #define xc_dprintk(f, a...) ((void)0)
37 #endif
39 static void *
40 xencomm_vaddr(unsigned long paddr, struct page_info *page)
41 {
42 return (void*)((paddr & ~PAGE_MASK) | (unsigned long)page_to_virt(page));
43 }
45 /* get_page() to prevent another vcpu freeing the page. */
46 static int
47 xencomm_get_page(unsigned long paddr, struct page_info **page)
48 {
49 unsigned long maddr = paddr_to_maddr(paddr);
50 if ( maddr == 0 )
51 return -EFAULT;
53 *page = maddr_to_page(maddr);
54 if ( get_page(*page, current->domain) == 0 )
55 {
56 if ( page_get_owner(*page) != current->domain )
57 {
58 /*
59 * This page might be a page granted by another domain, or
60 * this page is freed with decrease reservation hypercall at
61 * the same time.
62 */
63 gdprintk(XENLOG_WARNING,
64 "bad page is passed. paddr 0x%lx maddr 0x%lx\n",
65 paddr, maddr);
66 return -EFAULT;
67 }
69 /* Try again. */
70 cpu_relax();
71 return -EAGAIN;
72 }
74 return 0;
75 }
77 /* check if struct desc doesn't cross page boundry */
78 static int
79 xencomm_desc_cross_page_boundary(unsigned long paddr)
80 {
81 unsigned long offset = paddr & ~PAGE_MASK;
82 if ( offset > PAGE_SIZE - sizeof(struct xencomm_desc) )
83 return 1;
84 return 0;
85 }
87 struct xencomm_ctxt {
88 struct xencomm_desc __user *desc_in_paddr;
89 uint32_t nr_addrs;
91 struct page_info *page;
92 unsigned long *address;
93 };
95 static uint32_t
96 xencomm_ctxt_nr_addrs(const struct xencomm_ctxt *ctxt)
97 {
98 return ctxt->nr_addrs;
99 }
101 static unsigned long*
102 xencomm_ctxt_address(struct xencomm_ctxt *ctxt)
103 {
104 return ctxt->address;
105 }
107 static int
108 xencomm_ctxt_init(const void *handle, struct xencomm_ctxt *ctxt)
109 {
110 struct page_info *page;
111 struct xencomm_desc *desc;
112 int ret;
114 /* Avoid unaligned access. */
115 if ( ((unsigned long)handle % __alignof__(*desc)) != 0 )
116 return -EINVAL;
117 if ( xencomm_desc_cross_page_boundary((unsigned long)handle) )
118 return -EINVAL;
120 /* First we need to access the descriptor. */
121 ret = xencomm_get_page((unsigned long)handle, &page);
122 if ( ret )
123 return ret;
125 desc = xencomm_vaddr((unsigned long)handle, page);
126 if ( desc->magic != XENCOMM_MAGIC )
127 {
128 printk("%s: error: %p magic was 0x%x\n", __func__, desc, desc->magic);
129 put_page(page);
130 return -EINVAL;
131 }
133 /* Copy before use: It is possible for a guest to modify concurrently. */
134 ctxt->nr_addrs = desc->nr_addrs;
135 ctxt->desc_in_paddr = (struct xencomm_desc*)handle;
136 ctxt->page = page;
137 ctxt->address = &desc->address[0];
138 return 0;
139 }
141 /*
142 * Calculate the vaddr of &ctxt->desc_in_paddr->address[i] and get_page().
143 * And put the results in ctxt->page and ctxt->address.
144 * If there is the previous page, put_page().
145 *
146 * A guest domain passes the array, ctxt->desc_in_paddr->address[].
147 * It is gpaddr-contiguous, but not maddr-contiguous so that
148 * we can't obtain the vaddr by simple offsetting.
149 * We need to convert gpaddr, &ctxt->desc_in_paddr->address[i],
150 * into maddr and then convert it to the xen virtual address in order
151 * to access there.
152 * The conversion can be optimized out by using the last result of
153 * ctxt->address because we access the array sequentially.
154 * The conversion, gpaddr -> maddr -> vaddr, is necessary only when
155 * crossing page boundary.
156 */
157 static int
158 xencomm_ctxt_next(struct xencomm_ctxt *ctxt, int i)
159 {
160 unsigned long paddr;
161 struct page_info *page;
162 int ret;
164 BUG_ON(i >= ctxt->nr_addrs);
166 /* For i == 0 case we already calculated it in xencomm_ctxt_init(). */
167 if ( i != 0 )
168 ctxt->address++;
170 if ( ((unsigned long)ctxt->address & ~PAGE_MASK) != 0 )
171 return 0;
173 /* Crossing page boundary: machine address must be calculated. */
174 paddr = (unsigned long)&ctxt->desc_in_paddr->address[i];
175 ret = xencomm_get_page(paddr, &page);
176 if ( ret )
177 return ret;
179 put_page(ctxt->page);
180 ctxt->page = page;
181 ctxt->address = xencomm_vaddr(paddr, page);
183 return 0;
184 }
186 static void
187 xencomm_ctxt_done(struct xencomm_ctxt *ctxt)
188 {
189 put_page(ctxt->page);
190 }
192 static int
193 xencomm_copy_chunk_from(
194 unsigned long to, unsigned long paddr, unsigned int len)
195 {
196 struct page_info *page;
197 int res;
199 do {
200 res = xencomm_get_page(paddr, &page);
201 } while ( res == -EAGAIN );
203 if ( res )
204 return res;
206 xc_dprintk("%lx[%d] -> %lx\n",
207 (unsigned long)xencomm_vaddr(paddr, page), len, to);
209 memcpy((void *)to, xencomm_vaddr(paddr, page), len);
210 put_page(page);
212 return 0;
213 }
215 static unsigned long
216 xencomm_inline_from_guest(
217 void *to, const void *from, unsigned int n, unsigned int skip)
218 {
219 unsigned long src_paddr = xencomm_inline_addr(from) + skip;
221 while ( n > 0 )
222 {
223 unsigned int chunksz, bytes;
225 chunksz = PAGE_SIZE - (src_paddr % PAGE_SIZE);
226 bytes = min(chunksz, n);
228 if ( xencomm_copy_chunk_from((unsigned long)to, src_paddr, bytes) )
229 return n;
230 src_paddr += bytes;
231 to += bytes;
232 n -= bytes;
233 }
235 /* Always successful. */
236 return 0;
237 }
239 /**
240 * xencomm_copy_from_guest: Copy a block of data from domain space.
241 * @to: Machine address.
242 * @from: Physical address to a xencomm buffer descriptor.
243 * @n: Number of bytes to copy.
244 * @skip: Number of bytes from the start to skip.
245 *
246 * Copy data from domain to hypervisor.
247 *
248 * Returns number of bytes that could not be copied.
249 * On success, this will be zero.
250 */
251 unsigned long
252 xencomm_copy_from_guest(
253 void *to, const void *from, unsigned int n, unsigned int skip)
254 {
255 struct xencomm_ctxt ctxt;
256 unsigned int from_pos = 0;
257 unsigned int to_pos = 0;
258 unsigned int i = 0;
260 if ( xencomm_is_inline(from) )
261 return xencomm_inline_from_guest(to, from, n, skip);
263 if ( xencomm_ctxt_init(from, &ctxt) )
264 return n;
266 /* Iterate through the descriptor, copying up to a page at a time */
267 while ( (to_pos < n) && (i < xencomm_ctxt_nr_addrs(&ctxt)) )
268 {
269 unsigned long src_paddr;
270 unsigned int pgoffset, chunksz, chunk_skip;
272 if ( xencomm_ctxt_next(&ctxt, i) )
273 goto out;
274 src_paddr = *xencomm_ctxt_address(&ctxt);
275 if ( src_paddr == XENCOMM_INVALID )
276 {
277 i++;
278 continue;
279 }
281 pgoffset = src_paddr % PAGE_SIZE;
282 chunksz = PAGE_SIZE - pgoffset;
284 chunk_skip = min(chunksz, skip);
285 from_pos += chunk_skip;
286 chunksz -= chunk_skip;
287 skip -= chunk_skip;
289 if ( skip == 0 && chunksz > 0 )
290 {
291 unsigned int bytes = min(chunksz, n - to_pos);
293 if ( xencomm_copy_chunk_from((unsigned long)to + to_pos,
294 src_paddr + chunk_skip, bytes) )
295 goto out;
296 from_pos += bytes;
297 to_pos += bytes;
298 }
300 i++;
301 }
303 out:
304 xencomm_ctxt_done(&ctxt);
305 return n - to_pos;
306 }
308 static int
309 xencomm_copy_chunk_to(
310 unsigned long paddr, unsigned long from, unsigned int len)
311 {
312 struct page_info *page;
313 int res;
315 do {
316 res = xencomm_get_page(paddr, &page);
317 } while ( res == -EAGAIN );
319 if ( res )
320 return res;
322 xc_dprintk("%lx[%d] -> %lx\n", from, len,
323 (unsigned long)xencomm_vaddr(paddr, page));
325 memcpy(xencomm_vaddr(paddr, page), (void *)from, len);
326 put_page(page);
328 return 0;
329 }
331 static unsigned long
332 xencomm_inline_to_guest(
333 void *to, const void *from, unsigned int n, unsigned int skip)
334 {
335 unsigned long dest_paddr = xencomm_inline_addr(to) + skip;
337 while ( n > 0 )
338 {
339 unsigned int chunksz, bytes;
341 chunksz = PAGE_SIZE - (dest_paddr % PAGE_SIZE);
342 bytes = min(chunksz, n);
344 if ( xencomm_copy_chunk_to(dest_paddr, (unsigned long)from, bytes) )
345 return n;
346 dest_paddr += bytes;
347 from += bytes;
348 n -= bytes;
349 }
351 /* Always successful. */
352 return 0;
353 }
355 /**
356 * xencomm_copy_to_guest: Copy a block of data to domain space.
357 * @to: Physical address to xencomm buffer descriptor.
358 * @from: Machine address.
359 * @n: Number of bytes to copy.
360 * @skip: Number of bytes from the start to skip.
361 *
362 * Copy data from hypervisor to domain.
363 *
364 * Returns number of bytes that could not be copied.
365 * On success, this will be zero.
366 */
367 unsigned long
368 xencomm_copy_to_guest(
369 void *to, const void *from, unsigned int n, unsigned int skip)
370 {
371 struct xencomm_ctxt ctxt;
372 unsigned int from_pos = 0;
373 unsigned int to_pos = 0;
374 unsigned int i = 0;
376 if ( xencomm_is_inline(to) )
377 return xencomm_inline_to_guest(to, from, n, skip);
379 if ( xencomm_ctxt_init(to, &ctxt) )
380 return n;
382 /* Iterate through the descriptor, copying up to a page at a time */
383 while ( (from_pos < n) && (i < xencomm_ctxt_nr_addrs(&ctxt)) )
384 {
385 unsigned long dest_paddr;
386 unsigned int pgoffset, chunksz, chunk_skip;
388 if ( xencomm_ctxt_next(&ctxt, i) )
389 goto out;
390 dest_paddr = *xencomm_ctxt_address(&ctxt);
391 if ( dest_paddr == XENCOMM_INVALID )
392 {
393 i++;
394 continue;
395 }
397 pgoffset = dest_paddr % PAGE_SIZE;
398 chunksz = PAGE_SIZE - pgoffset;
400 chunk_skip = min(chunksz, skip);
401 to_pos += chunk_skip;
402 chunksz -= chunk_skip;
403 skip -= chunk_skip;
405 if ( skip == 0 && chunksz > 0 )
406 {
407 unsigned int bytes = min(chunksz, n - from_pos);
409 if ( xencomm_copy_chunk_to(dest_paddr + chunk_skip,
410 (unsigned long)from + from_pos, bytes) )
411 goto out;
412 from_pos += bytes;
413 to_pos += bytes;
414 }
416 i++;
417 }
419 out:
420 xencomm_ctxt_done(&ctxt);
421 return n - from_pos;
422 }
424 static int xencomm_inline_add_offset(void **handle, unsigned int bytes)
425 {
426 *handle += bytes;
427 return 0;
428 }
430 /* Offset page addresses in 'handle' to skip 'bytes' bytes. Set completely
431 * exhausted pages to XENCOMM_INVALID. */
432 int xencomm_add_offset(void **handle, unsigned int bytes)
433 {
434 struct xencomm_ctxt ctxt;
435 int i = 0;
436 int res = 0;
438 if ( xencomm_is_inline(*handle) )
439 return xencomm_inline_add_offset(handle, bytes);
441 res = xencomm_ctxt_init(handle, &ctxt);
442 if ( res != 0 )
443 return res;
445 /* Iterate through the descriptor incrementing addresses */
446 while ( (bytes > 0) && (i < xencomm_ctxt_nr_addrs(&ctxt)) )
447 {
448 unsigned long *address;
449 unsigned long dest_paddr;
450 unsigned int pgoffset, chunksz, chunk_skip;
452 res = xencomm_ctxt_next(&ctxt, i);
453 if ( res )
454 goto out;
455 address = xencomm_ctxt_address(&ctxt);
456 dest_paddr = *address;
457 if ( dest_paddr == XENCOMM_INVALID )
458 {
459 i++;
460 continue;
461 }
463 pgoffset = dest_paddr % PAGE_SIZE;
464 chunksz = PAGE_SIZE - pgoffset;
466 chunk_skip = min(chunksz, bytes);
467 if ( chunk_skip == chunksz )
468 *address = XENCOMM_INVALID; /* exhausted this page */
469 else
470 *address += chunk_skip;
471 bytes -= chunk_skip;
473 i++;
474 }
476 out:
477 xencomm_ctxt_done(&ctxt);
478 return res;
479 }
481 int xencomm_handle_is_null(void *handle)
482 {
483 struct xencomm_ctxt ctxt;
484 int i;
485 int res = 1;
487 if ( xencomm_is_inline(handle) )
488 return xencomm_inline_addr(handle) == 0;
490 if ( xencomm_ctxt_init(handle, &ctxt) )
491 return 1;
493 for ( i = 0; i < xencomm_ctxt_nr_addrs(&ctxt); i++ )
494 {
495 if ( xencomm_ctxt_next(&ctxt, i) )
496 goto out;
497 if ( *xencomm_ctxt_address(&ctxt) != XENCOMM_INVALID )
498 {
499 res = 0;
500 goto out;
501 }
502 }
504 out:
505 xencomm_ctxt_done(&ctxt);
506 return res;
507 }
509 /*
510 * Local variables:
511 * mode: C
512 * c-set-style: "BSD"
513 * c-basic-offset: 4
514 * tab-width: 4
515 * indent-tabs-mode: nil
516 * End:
517 */