ia64/xen-unstable

view tools/libxc/xc_hvm_build.c @ 11081:323eb29083e6

[HVM] Remove unused apic_enabled field from hvm_info_table.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Aug 10 15:45:47 2006 +0100 (2006-08-10)
parents 713b0878da2f
children 0340e579f065
line source
1 /******************************************************************************
2 * xc_hvm_build.c
3 */
5 #define ELFSIZE 32
6 #include <stddef.h>
7 #include <inttypes.h>
8 #include "xg_private.h"
9 #include "xc_private.h"
10 #include "xc_elf.h"
11 #include <stdlib.h>
12 #include <unistd.h>
13 #include <zlib.h>
14 #include <xen/hvm/hvm_info_table.h>
15 #include <xen/hvm/ioreq.h>
16 #include <xen/hvm/params.h>
18 #define HVM_LOADER_ENTR_ADDR 0x00100000
20 #define E820MAX 128
22 #define E820_RAM 1
23 #define E820_RESERVED 2
24 #define E820_ACPI 3
25 #define E820_NVS 4
26 #define E820_IO 16
27 #define E820_SHARED_PAGE 17
28 #define E820_XENSTORE 18
29 #define E820_BUFFERED_IO 19
31 #define E820_MAP_PAGE 0x00090000
32 #define E820_MAP_NR_OFFSET 0x000001E8
33 #define E820_MAP_OFFSET 0x000002D0
35 struct e820entry {
36 uint64_t addr;
37 uint64_t size;
38 uint32_t type;
39 } __attribute__((packed));
41 static int
42 parseelfimage(
43 char *elfbase, unsigned long elfsize, struct domain_setup_info *dsi);
44 static int
45 loadelfimage(
46 char *elfbase, int xch, uint32_t dom, unsigned long *parray,
47 struct domain_setup_info *dsi);
49 static void xc_set_hvm_param(int handle,
50 domid_t dom, int param, unsigned long value)
51 {
52 DECLARE_HYPERCALL;
53 xen_hvm_param_t arg;
54 int rc;
56 hypercall.op = __HYPERVISOR_hvm_op;
57 hypercall.arg[0] = HVMOP_set_param;
58 hypercall.arg[1] = (unsigned long)&arg;
59 arg.domid = dom;
60 arg.index = param;
61 arg.value = value;
62 if ( mlock(&arg, sizeof(arg)) != 0 )
63 {
64 PERROR("Could not lock memory for set parameter");
65 return;
66 }
67 rc = do_xen_hypercall(handle, &hypercall);
68 safe_munlock(&arg, sizeof(arg));
69 if (rc < 0)
70 PERROR("set HVM parameter failed (%d)", rc);
71 }
73 static void build_e820map(void *e820_page, unsigned long long mem_size)
74 {
75 struct e820entry *e820entry =
76 (struct e820entry *)(((unsigned char *)e820_page) + E820_MAP_OFFSET);
77 unsigned char nr_map = 0;
79 /* XXX: Doesn't work for > 4GB yet */
80 e820entry[nr_map].addr = 0x0;
81 e820entry[nr_map].size = 0x9F800;
82 e820entry[nr_map].type = E820_RAM;
83 nr_map++;
85 e820entry[nr_map].addr = 0x9F800;
86 e820entry[nr_map].size = 0x800;
87 e820entry[nr_map].type = E820_RESERVED;
88 nr_map++;
90 e820entry[nr_map].addr = 0xA0000;
91 e820entry[nr_map].size = 0x20000;
92 e820entry[nr_map].type = E820_IO;
93 nr_map++;
95 e820entry[nr_map].addr = 0xF0000;
96 e820entry[nr_map].size = 0x10000;
97 e820entry[nr_map].type = E820_RESERVED;
98 nr_map++;
100 #define STATIC_PAGES 3
101 /* 3 static pages:
102 * - ioreq buffer.
103 * - xenstore.
104 * - shared_page.
105 */
107 /* Most of the ram goes here */
108 e820entry[nr_map].addr = 0x100000;
109 e820entry[nr_map].size = mem_size - 0x100000 - STATIC_PAGES * PAGE_SIZE;
110 e820entry[nr_map].type = E820_RAM;
111 nr_map++;
113 /* Statically allocated special pages */
115 /* For buffered IO requests */
116 e820entry[nr_map].addr = mem_size - 3 * PAGE_SIZE;
117 e820entry[nr_map].size = PAGE_SIZE;
118 e820entry[nr_map].type = E820_BUFFERED_IO;
119 nr_map++;
121 /* For xenstore */
122 e820entry[nr_map].addr = mem_size - 2 * PAGE_SIZE;
123 e820entry[nr_map].size = PAGE_SIZE;
124 e820entry[nr_map].type = E820_XENSTORE;
125 nr_map++;
127 /* Shared ioreq_t page */
128 e820entry[nr_map].addr = mem_size - PAGE_SIZE;
129 e820entry[nr_map].size = PAGE_SIZE;
130 e820entry[nr_map].type = E820_SHARED_PAGE;
131 nr_map++;
133 e820entry[nr_map].addr = mem_size;
134 e820entry[nr_map].size = 0x3 * PAGE_SIZE;
135 e820entry[nr_map].type = E820_NVS;
136 nr_map++;
138 e820entry[nr_map].addr = mem_size + 0x3 * PAGE_SIZE;
139 e820entry[nr_map].size = 0xA * PAGE_SIZE;
140 e820entry[nr_map].type = E820_ACPI;
141 nr_map++;
143 e820entry[nr_map].addr = 0xFEC00000;
144 e820entry[nr_map].size = 0x1400000;
145 e820entry[nr_map].type = E820_IO;
146 nr_map++;
148 *(((unsigned char *)e820_page) + E820_MAP_NR_OFFSET) = nr_map;
149 }
151 static void set_hvm_info_checksum(struct hvm_info_table *t)
152 {
153 uint8_t *ptr = (uint8_t *)t, sum = 0;
154 unsigned int i;
156 t->checksum = 0;
158 for (i = 0; i < t->length; i++)
159 sum += *ptr++;
161 t->checksum = -sum;
162 }
164 /*
165 * Use E820 reserved memory 0x9F800 to pass HVM info to hvmloader
166 * hvmloader will use this info to set BIOS accordingly
167 */
168 static int set_hvm_info(int xc_handle, uint32_t dom,
169 xen_pfn_t *pfn_list, unsigned int vcpus,
170 unsigned int acpi, unsigned int apic)
171 {
172 char *va_map;
173 struct hvm_info_table *va_hvm;
175 va_map = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
176 PROT_READ | PROT_WRITE,
177 pfn_list[HVM_INFO_PFN]);
179 if ( va_map == NULL )
180 return -1;
182 va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET);
183 memset(va_hvm, 0, sizeof(*va_hvm));
185 strncpy(va_hvm->signature, "HVM INFO", 8);
186 va_hvm->length = sizeof(struct hvm_info_table);
187 va_hvm->acpi_enabled = acpi;
188 va_hvm->nr_vcpus = vcpus;
190 set_hvm_info_checksum(va_hvm);
192 munmap(va_map, PAGE_SIZE);
194 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_APIC_ENABLED, apic);
196 return 0;
197 }
199 static int setup_guest(int xc_handle,
200 uint32_t dom, int memsize,
201 char *image, unsigned long image_size,
202 unsigned long nr_pages,
203 vcpu_guest_context_t *ctxt,
204 unsigned long shared_info_frame,
205 unsigned int vcpus,
206 unsigned int pae,
207 unsigned int acpi,
208 unsigned int apic,
209 unsigned int store_evtchn,
210 unsigned long *store_mfn)
211 {
212 xen_pfn_t *page_array = NULL;
213 unsigned long count, i;
214 unsigned long long ptr;
215 xc_mmu_t *mmu = NULL;
217 shared_info_t *shared_info;
218 void *e820_page;
220 struct domain_setup_info dsi;
221 uint64_t v_end;
223 unsigned long shared_page_frame = 0;
224 shared_iopage_t *sp;
226 unsigned long ioreq_buffer_frame = 0;
227 void *ioreq_buffer_page;
229 memset(&dsi, 0, sizeof(struct domain_setup_info));
231 if ( (parseelfimage(image, image_size, &dsi)) != 0 )
232 goto error_out;
234 if ( (dsi.v_kernstart & (PAGE_SIZE - 1)) != 0 )
235 {
236 PERROR("Guest OS must load to a page boundary.\n");
237 goto error_out;
238 }
240 /* memsize is in megabytes */
241 v_end = (unsigned long long)memsize << 20;
243 IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
244 " Loaded HVM loader: %016"PRIx64"->%016"PRIx64"\n"
245 " TOTAL: %016"PRIx64"->%016"PRIx64"\n",
246 dsi.v_kernstart, dsi.v_kernend,
247 dsi.v_start, v_end);
248 IPRINTF(" ENTRY ADDRESS: %016"PRIx64"\n", dsi.v_kernentry);
250 if ( (v_end - dsi.v_start) > ((unsigned long long)nr_pages << PAGE_SHIFT) )
251 {
252 PERROR("Initial guest OS requires too much space: "
253 "(%lluMB is greater than %lluMB limit)\n",
254 (unsigned long long)(v_end - dsi.v_start) >> 20,
255 ((unsigned long long)nr_pages << PAGE_SHIFT) >> 20);
256 goto error_out;
257 }
259 if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL )
260 {
261 PERROR("Could not allocate memory.\n");
262 goto error_out;
263 }
265 if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
266 {
267 PERROR("Could not get the page frame list.\n");
268 goto error_out;
269 }
271 loadelfimage(image, xc_handle, dom, page_array, &dsi);
273 if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
274 goto error_out;
276 /* Write the machine->phys table entries. */
277 for ( count = 0; count < nr_pages; count++ )
278 {
279 ptr = (unsigned long long)page_array[count] << PAGE_SHIFT;
280 if ( xc_add_mmu_update(xc_handle, mmu,
281 ptr | MMU_MACHPHYS_UPDATE, count) )
282 goto error_out;
283 }
285 if ( set_hvm_info(xc_handle, dom, page_array, vcpus, acpi, apic) )
286 {
287 ERROR("Couldn't set hvm info for HVM guest.\n");
288 goto error_out;
289 }
291 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_PAE_ENABLED, pae);
293 if ( (e820_page = xc_map_foreign_range(
294 xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
295 page_array[E820_MAP_PAGE >> PAGE_SHIFT])) == 0 )
296 goto error_out;
297 memset(e820_page, 0, PAGE_SIZE);
298 build_e820map(e820_page, v_end);
299 munmap(e820_page, PAGE_SIZE);
301 /* shared_info page starts its life empty. */
302 if ( (shared_info = xc_map_foreign_range(
303 xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
304 shared_info_frame)) == 0 )
305 goto error_out;
306 memset(shared_info, 0, sizeof(shared_info_t));
307 /* Mask all upcalls... */
308 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
309 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
310 munmap(shared_info, PAGE_SIZE);
312 /* Paranoia */
313 shared_page_frame = page_array[(v_end >> PAGE_SHIFT) - 1];
314 if ( (sp = (shared_iopage_t *) xc_map_foreign_range(
315 xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
316 shared_page_frame)) == 0 )
317 goto error_out;
318 memset(sp, 0, PAGE_SIZE);
319 munmap(sp, PAGE_SIZE);
321 /* clean the buffered IO requests page */
322 ioreq_buffer_frame = page_array[(v_end >> PAGE_SHIFT) - 3];
323 ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
324 PROT_READ | PROT_WRITE,
325 ioreq_buffer_frame);
327 if ( ioreq_buffer_page == NULL )
328 goto error_out;
330 memset(ioreq_buffer_page, 0, PAGE_SIZE);
332 munmap(ioreq_buffer_page, PAGE_SIZE);
334 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, (v_end >> PAGE_SHIFT) - 2);
335 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
337 *store_mfn = page_array[(v_end >> PAGE_SHIFT) - 2];
338 if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) )
339 goto error_out;
341 /* Send the page update requests down to the hypervisor. */
342 if ( xc_finish_mmu_updates(xc_handle, mmu) )
343 goto error_out;
345 free(mmu);
346 free(page_array);
348 /*
349 * Initial register values:
350 */
351 ctxt->user_regs.eip = dsi.v_kernentry;
353 return 0;
355 error_out:
356 free(mmu);
357 free(page_array);
358 return -1;
359 }
361 static int xc_hvm_build_internal(int xc_handle,
362 uint32_t domid,
363 int memsize,
364 char *image,
365 unsigned long image_size,
366 unsigned int vcpus,
367 unsigned int pae,
368 unsigned int acpi,
369 unsigned int apic,
370 unsigned int store_evtchn,
371 unsigned long *store_mfn)
372 {
373 dom0_op_t launch_op, op;
374 int rc, i;
375 vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
376 unsigned long nr_pages;
377 xen_capabilities_info_t xen_caps;
379 if ( (image == NULL) || (image_size == 0) )
380 {
381 ERROR("Image required");
382 goto error_out;
383 }
385 if ( (rc = xc_version(xc_handle, XENVER_capabilities, &xen_caps)) != 0 )
386 {
387 PERROR("Failed to get xen version info");
388 goto error_out;
389 }
391 if ( !strstr(xen_caps, "hvm") )
392 {
393 PERROR("CPU doesn't support HVM extensions or "
394 "the extensions are not enabled");
395 goto error_out;
396 }
398 if ( (nr_pages = xc_get_tot_pages(xc_handle, domid)) < 0 )
399 {
400 PERROR("Could not find total pages for domain");
401 goto error_out;
402 }
404 if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
405 {
406 PERROR("%s: ctxt mlock failed", __func__);
407 return 1;
408 }
410 op.cmd = DOM0_GETDOMAININFO;
411 op.u.getdomaininfo.domain = (domid_t)domid;
412 if ( (xc_dom0_op(xc_handle, &op) < 0) ||
413 ((uint16_t)op.u.getdomaininfo.domain != domid) )
414 {
415 PERROR("Could not get info on domain");
416 goto error_out;
417 }
419 memset(ctxt, 0, sizeof(*ctxt));
421 ctxt->flags = VGCF_HVM_GUEST;
422 if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
423 ctxt, op.u.getdomaininfo.shared_info_frame,
424 vcpus, pae, acpi, apic, store_evtchn, store_mfn) < 0)
425 {
426 ERROR("Error constructing guest OS");
427 goto error_out;
428 }
430 /* FPU is set up to default initial state. */
431 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
433 /* Virtual IDT is empty at start-of-day. */
434 for ( i = 0; i < 256; i++ )
435 {
436 ctxt->trap_ctxt[i].vector = i;
437 ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
438 }
440 /* No LDT. */
441 ctxt->ldt_ents = 0;
443 /* Use the default Xen-provided GDT. */
444 ctxt->gdt_ents = 0;
446 /* No debugging. */
447 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
449 /* No callback handlers. */
450 #if defined(__i386__)
451 ctxt->event_callback_cs = FLAT_KERNEL_CS;
452 ctxt->event_callback_eip = 0;
453 ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
454 ctxt->failsafe_callback_eip = 0;
455 #elif defined(__x86_64__)
456 ctxt->event_callback_eip = 0;
457 ctxt->failsafe_callback_eip = 0;
458 ctxt->syscall_callback_eip = 0;
459 #endif
461 memset( &launch_op, 0, sizeof(launch_op) );
463 launch_op.u.setvcpucontext.domain = (domid_t)domid;
464 launch_op.u.setvcpucontext.vcpu = 0;
465 set_xen_guest_handle(launch_op.u.setvcpucontext.ctxt, ctxt);
467 launch_op.cmd = DOM0_SETVCPUCONTEXT;
468 rc = xc_dom0_op(xc_handle, &launch_op);
470 return rc;
472 error_out:
473 return -1;
474 }
476 static inline int is_loadable_phdr(Elf32_Phdr *phdr)
477 {
478 return ((phdr->p_type == PT_LOAD) &&
479 ((phdr->p_flags & (PF_W|PF_X)) != 0));
480 }
482 static int parseelfimage(char *elfbase,
483 unsigned long elfsize,
484 struct domain_setup_info *dsi)
485 {
486 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfbase;
487 Elf32_Phdr *phdr;
488 Elf32_Shdr *shdr;
489 unsigned long kernstart = ~0UL, kernend=0UL;
490 char *shstrtab;
491 int h;
493 if ( !IS_ELF(*ehdr) )
494 {
495 ERROR("Kernel image does not have an ELF header.");
496 return -EINVAL;
497 }
499 if ( (ehdr->e_phoff + (ehdr->e_phnum * ehdr->e_phentsize)) > elfsize )
500 {
501 ERROR("ELF program headers extend beyond end of image.");
502 return -EINVAL;
503 }
505 if ( (ehdr->e_shoff + (ehdr->e_shnum * ehdr->e_shentsize)) > elfsize )
506 {
507 ERROR("ELF section headers extend beyond end of image.");
508 return -EINVAL;
509 }
511 /* Find the section-header strings table. */
512 if ( ehdr->e_shstrndx == SHN_UNDEF )
513 {
514 ERROR("ELF image has no section-header strings table (shstrtab).");
515 return -EINVAL;
516 }
517 shdr = (Elf32_Shdr *)(elfbase + ehdr->e_shoff +
518 (ehdr->e_shstrndx*ehdr->e_shentsize));
519 shstrtab = elfbase + shdr->sh_offset;
521 for ( h = 0; h < ehdr->e_phnum; h++ )
522 {
523 phdr = (Elf32_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
524 if ( !is_loadable_phdr(phdr) )
525 continue;
526 if ( phdr->p_paddr < kernstart )
527 kernstart = phdr->p_paddr;
528 if ( (phdr->p_paddr + phdr->p_memsz) > kernend )
529 kernend = phdr->p_paddr + phdr->p_memsz;
530 }
532 if ( (kernstart > kernend) ||
533 (ehdr->e_entry < kernstart) ||
534 (ehdr->e_entry > kernend) )
535 {
536 ERROR("Malformed ELF image.");
537 return -EINVAL;
538 }
540 dsi->v_start = 0x00000000;
542 dsi->v_kernstart = kernstart;
543 dsi->v_kernend = kernend;
544 dsi->v_kernentry = HVM_LOADER_ENTR_ADDR;
546 dsi->v_end = dsi->v_kernend;
548 return 0;
549 }
551 static int
552 loadelfimage(
553 char *elfbase, int xch, uint32_t dom, unsigned long *parray,
554 struct domain_setup_info *dsi)
555 {
556 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfbase;
557 Elf32_Phdr *phdr;
558 int h;
560 char *va;
561 unsigned long pa, done, chunksz;
563 for ( h = 0; h < ehdr->e_phnum; h++ )
564 {
565 phdr = (Elf32_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
566 if ( !is_loadable_phdr(phdr) )
567 continue;
569 for ( done = 0; done < phdr->p_filesz; done += chunksz )
570 {
571 pa = (phdr->p_paddr + done) - dsi->v_start;
572 if ((va = xc_map_foreign_range(
573 xch, dom, PAGE_SIZE, PROT_WRITE,
574 parray[pa >> PAGE_SHIFT])) == 0)
575 return -1;
576 chunksz = phdr->p_filesz - done;
577 if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
578 chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
579 memcpy(va + (pa & (PAGE_SIZE-1)),
580 elfbase + phdr->p_offset + done, chunksz);
581 munmap(va, PAGE_SIZE);
582 }
584 for ( ; done < phdr->p_memsz; done += chunksz )
585 {
586 pa = (phdr->p_paddr + done) - dsi->v_start;
587 if ((va = xc_map_foreign_range(
588 xch, dom, PAGE_SIZE, PROT_WRITE,
589 parray[pa >> PAGE_SHIFT])) == 0)
590 return -1;
591 chunksz = phdr->p_memsz - done;
592 if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
593 chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
594 memset(va + (pa & (PAGE_SIZE-1)), 0, chunksz);
595 munmap(va, PAGE_SIZE);
596 }
597 }
599 return 0;
600 }
602 /* xc_hvm_build
603 *
604 * Create a domain for a virtualized Linux, using files/filenames
605 *
606 */
608 int xc_hvm_build(int xc_handle,
609 uint32_t domid,
610 int memsize,
611 const char *image_name,
612 unsigned int vcpus,
613 unsigned int pae,
614 unsigned int acpi,
615 unsigned int apic,
616 unsigned int store_evtchn,
617 unsigned long *store_mfn)
618 {
619 char *image;
620 int sts;
621 unsigned long image_size;
623 if ( (image_name == NULL) ||
624 ((image = xc_read_image(image_name, &image_size)) == NULL) )
625 return -1;
627 sts = xc_hvm_build_internal(xc_handle, domid, memsize,
628 image, image_size,
629 vcpus, pae, acpi, apic,
630 store_evtchn, store_mfn);
632 free(image);
634 return sts;
635 }
637 /* xc_hvm_build_mem
638 *
639 * Create a domain for a virtualized Linux, using buffers
640 *
641 */
643 int xc_hvm_build_mem(int xc_handle,
644 uint32_t domid,
645 int memsize,
646 const char *image_buffer,
647 unsigned long image_size,
648 unsigned int vcpus,
649 unsigned int pae,
650 unsigned int acpi,
651 unsigned int apic,
652 unsigned int store_evtchn,
653 unsigned long *store_mfn)
654 {
655 int sts;
656 unsigned long img_len;
657 char *img;
659 /* Validate that there is a kernel buffer */
661 if ( (image_buffer == NULL) || (image_size == 0) )
662 {
663 ERROR("kernel image buffer not present");
664 return -1;
665 }
667 img = xc_inflate_buffer(image_buffer, image_size, &img_len);
668 if (img == NULL)
669 {
670 ERROR("unable to inflate ram disk buffer");
671 return -1;
672 }
674 sts = xc_hvm_build_internal(xc_handle, domid, memsize,
675 img, img_len,
676 vcpus, pae, acpi, apic,
677 store_evtchn, store_mfn);
679 /* xc_inflate_buffer may return the original buffer pointer (for
680 for already inflated buffers), so exercise some care in freeing */
682 if ( (img != NULL) && (img != image_buffer) )
683 free(img);
685 return sts;
686 }
688 /*
689 * Local variables:
690 * mode: C
691 * c-set-style: "BSD"
692 * c-basic-offset: 4
693 * tab-width: 4
694 * indent-tabs-mode: nil
695 * End:
696 */