ia64/xen-unstable

view tools/libxc/xc_linux_build.c @ 5331:c2f094c21ddf

bitkeeper revision 1.1679 (42a40cfaZ0Dy-HjTM0W3L10VllkAnw)

Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into firebug.cl.cam.ac.uk:/local/scratch/cl349/xen-unstable.bk-clean
author cl349@firebug.cl.cam.ac.uk
date Mon Jun 06 08:44:42 2005 +0000 (2005-06-06)
parents c59632e7ff3e 411d895b167e
children fa735bb2f79a
line source
1 /******************************************************************************
2 * xc_linux_build.c
3 */
5 #include "xc_private.h"
7 #if defined(__i386__)
8 #define ELFSIZE 32
9 #endif
11 #if defined(__x86_64__)
12 #define ELFSIZE 64
13 #endif
16 #include "xc_elf.h"
17 #include <stdlib.h>
18 #include <zlib.h>
20 #if defined(__i386__)
21 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
22 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
23 #endif
25 #if defined(__x86_64__)
26 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
27 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
28 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
29 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
30 #endif
33 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
34 #define round_pgdown(_p) ((_p)&PAGE_MASK)
36 static int probeimageformat(char *image,
37 unsigned long image_size,
38 struct load_funcs *load_funcs)
39 {
40 if ( probe_elf(image, image_size, load_funcs) &&
41 probe_bin(image, image_size, load_funcs) )
42 {
43 ERROR( "Unrecognized image format" );
44 return -EINVAL;
45 }
47 return 0;
48 }
50 static int setup_guest(int xc_handle,
51 u32 dom,
52 char *image, unsigned long image_size,
53 gzFile initrd_gfd, unsigned long initrd_len,
54 unsigned long nr_pages,
55 unsigned long *pvsi, unsigned long *pvke,
56 vcpu_guest_context_t *ctxt,
57 const char *cmdline,
58 unsigned long shared_info_frame,
59 unsigned int control_evtchn,
60 unsigned long flags,
61 unsigned int vcpus)
62 {
63 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
64 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
65 #if defined(__x86_64__)
66 l3_pgentry_t *vl3tab=NULL, *vl3e=NULL;
67 l4_pgentry_t *vl4tab=NULL, *vl4e=NULL;
68 #endif
69 unsigned long *page_array = NULL;
70 unsigned long l2tab = 0;
71 unsigned long l1tab = 0;
72 #if defined(__x86_64__)
73 unsigned long l3tab = 0;
74 unsigned long l4tab = 0;
75 #endif
76 unsigned long count, i;
77 start_info_t *start_info;
78 shared_info_t *shared_info;
79 mmu_t *mmu = NULL;
80 int rc;
82 unsigned long nr_pt_pages;
83 unsigned long ppt_alloc;
84 unsigned long *physmap, *physmap_e, physmap_pfn;
86 struct load_funcs load_funcs;
87 struct domain_setup_info dsi;
88 unsigned long vinitrd_start;
89 unsigned long vinitrd_end;
90 unsigned long vphysmap_start;
91 unsigned long vphysmap_end;
92 unsigned long vstartinfo_start;
93 unsigned long vstartinfo_end;
94 unsigned long vstack_start;
95 unsigned long vstack_end;
96 unsigned long vpt_start;
97 unsigned long vpt_end;
98 unsigned long v_end;
100 rc = probeimageformat(image, image_size, &load_funcs);
101 if ( rc != 0 )
102 goto error_out;
104 memset(&dsi, 0, sizeof(struct domain_setup_info));
106 rc = (load_funcs.parseimage)(image, image_size, &dsi);
107 if ( rc != 0 )
108 goto error_out;
110 if ( (dsi.v_start & (PAGE_SIZE-1)) != 0 )
111 {
112 PERROR("Guest OS must load to a page boundary.\n");
113 goto error_out;
114 }
116 /*
117 * Why do we need this? The number of page-table frames depends on the
118 * size of the bootstrap address space. But the size of the address space
119 * depends on the number of page-table frames (since each one is mapped
120 * read-only). We have a pair of simultaneous equations in two unknowns,
121 * which we solve by exhaustive search.
122 */
123 vinitrd_start = round_pgup(dsi.v_end);
124 vinitrd_end = vinitrd_start + initrd_len;
125 vphysmap_start = round_pgup(vinitrd_end);
126 vphysmap_end = vphysmap_start + (nr_pages * sizeof(unsigned long));
127 vpt_start = round_pgup(vphysmap_end);
128 for ( nr_pt_pages = 2; ; nr_pt_pages++ )
129 {
130 vpt_end = vpt_start + (nr_pt_pages * PAGE_SIZE);
131 vstartinfo_start = vpt_end;
132 vstartinfo_end = vstartinfo_start + PAGE_SIZE;
133 vstack_start = vstartinfo_end;
134 vstack_end = vstack_start + PAGE_SIZE;
135 v_end = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1);
136 if ( (v_end - vstack_end) < (512UL << 10) )
137 v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
138 #if defined(__i386__)
139 if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >>
140 L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
141 break;
142 #endif
143 #if defined(__x86_64__)
144 #define NR(_l,_h,_s) \
145 (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
146 ((_l) & ~((1UL<<(_s))-1))) >> (_s))
147 if ( (1 + /* # L4 */
148 NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
149 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
150 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
151 <= nr_pt_pages )
152 break;
153 #endif
154 }
156 #define _p(a) ((void *) (a))
158 printf("VIRTUAL MEMORY ARRANGEMENT:\n"
159 " Loaded kernel: %p->%p\n"
160 " Init. ramdisk: %p->%p\n"
161 " Phys-Mach map: %p->%p\n"
162 " Page tables: %p->%p\n"
163 " Start info: %p->%p\n"
164 " Boot stack: %p->%p\n"
165 " TOTAL: %p->%p\n",
166 _p(dsi.v_kernstart), _p(dsi.v_kernend),
167 _p(vinitrd_start), _p(vinitrd_end),
168 _p(vphysmap_start), _p(vphysmap_end),
169 _p(vpt_start), _p(vpt_end),
170 _p(vstartinfo_start), _p(vstartinfo_end),
171 _p(vstack_start), _p(vstack_end),
172 _p(dsi.v_start), _p(v_end));
173 printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
175 if ( (v_end - dsi.v_start) > (nr_pages * PAGE_SIZE) )
176 {
177 printf("Initial guest OS requires too much space\n"
178 "(%luMB is greater than %luMB limit)\n",
179 (v_end-dsi.v_start)>>20, (nr_pages<<PAGE_SHIFT)>>20);
180 goto error_out;
181 }
183 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
184 {
185 PERROR("Could not allocate memory");
186 goto error_out;
187 }
189 if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
190 {
191 PERROR("Could not get the page frame list");
192 goto error_out;
193 }
195 (load_funcs.loadimage)(image, image_size, xc_handle, dom, page_array,
196 &dsi);
198 /* Load the initial ramdisk image. */
199 if ( initrd_len != 0 )
200 {
201 for ( i = (vinitrd_start - dsi.v_start);
202 i < (vinitrd_end - dsi.v_start); i += PAGE_SIZE )
203 {
204 char page[PAGE_SIZE];
205 if ( gzread(initrd_gfd, page, PAGE_SIZE) == -1 )
206 {
207 PERROR("Error reading initrd image, could not");
208 goto error_out;
209 }
210 xc_copy_to_domain_page(xc_handle, dom,
211 page_array[i>>PAGE_SHIFT], page);
212 }
213 }
215 if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL )
216 goto error_out;
218 #if defined(__i386__)
219 /* First allocate page for page dir. */
220 ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
221 l2tab = page_array[ppt_alloc++] << PAGE_SHIFT;
222 ctxt->pt_base = l2tab;
224 /* Initialise the page tables. */
225 if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
226 PROT_READ|PROT_WRITE,
227 l2tab >> PAGE_SHIFT)) == NULL )
228 goto error_out;
229 memset(vl2tab, 0, PAGE_SIZE);
230 vl2e = &vl2tab[l2_table_offset(dsi.v_start)];
231 for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
232 {
233 if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
234 {
235 l1tab = page_array[ppt_alloc++] << PAGE_SHIFT;
236 if ( vl1tab != NULL )
237 munmap(vl1tab, PAGE_SIZE);
238 if ( (vl1tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
239 PROT_READ|PROT_WRITE,
240 l1tab >> PAGE_SHIFT)) == NULL )
241 {
242 munmap(vl2tab, PAGE_SIZE);
243 goto error_out;
244 }
245 memset(vl1tab, 0, PAGE_SIZE);
246 vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
247 *vl2e++ = l1tab | L2_PROT;
248 }
250 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
251 if ( (count >= ((vpt_start-dsi.v_start)>>PAGE_SHIFT)) &&
252 (count < ((vpt_end -dsi.v_start)>>PAGE_SHIFT)) )
253 *vl1e &= ~_PAGE_RW;
254 vl1e++;
255 }
256 munmap(vl1tab, PAGE_SIZE);
257 munmap(vl2tab, PAGE_SIZE);
258 #endif
259 #if defined(__x86_64__)
261 #define alloc_pt(ltab, vltab) \
262 ltab = page_array[ppt_alloc++] << PAGE_SHIFT; \
263 if (vltab != NULL) { \
264 munmap(vltab, PAGE_SIZE); \
265 } \
266 if ((vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, \
267 PROT_READ|PROT_WRITE, \
268 ltab >> PAGE_SHIFT)) == NULL) { \
269 munmap(vltab, PAGE_SIZE); \
270 goto error_out; \
271 } \
272 memset(vltab, 0, PAGE_SIZE);
274 /* First allocate page for page dir. */
275 ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
276 l4tab = page_array[ppt_alloc++] << PAGE_SHIFT;
277 ctxt->pt_base = l4tab;
279 /* Intiliaize page table */
280 if ( (vl4tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
281 PROT_READ|PROT_WRITE,
282 l4tab >> PAGE_SHIFT)) == NULL )
283 goto error_out;
284 memset(vl4tab, 0, PAGE_SIZE);
285 vl4e = &vl4tab[l4_table_offset(dsi.v_start)];
287 for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++)
288 {
289 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
290 {
291 alloc_pt(l1tab, vl1tab);
293 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
294 {
295 alloc_pt(l2tab, vl2tab);
296 if ( !((unsigned long)vl3e & (PAGE_SIZE-1)) )
297 {
298 alloc_pt(l3tab, vl3tab);
299 vl3e = &vl3tab[l3_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
300 *vl4e = l3tab | L4_PROT;
301 vl4e++;
302 }
303 vl2e = &vl2tab[l2_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
304 *vl3e = l2tab | L3_PROT;
305 vl3e++;
306 }
307 vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
308 *vl2e = l1tab | L2_PROT;
309 vl2e++;
310 }
312 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
313 if ( (count >= ((vpt_start-dsi.v_start)>>PAGE_SHIFT)) &&
314 (count < ((vpt_end -dsi.v_start)>>PAGE_SHIFT)) )
315 {
316 *vl1e &= ~_PAGE_RW;
317 }
318 vl1e++;
319 }
321 munmap(vl1tab, PAGE_SIZE);
322 munmap(vl2tab, PAGE_SIZE);
323 munmap(vl3tab, PAGE_SIZE);
324 munmap(vl4tab, PAGE_SIZE);
325 #endif
327 /* Write the phys->machine and machine->phys table entries. */
328 physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
329 physmap = physmap_e = xc_map_foreign_range(
330 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
331 page_array[physmap_pfn++]);
332 for ( count = 0; count < nr_pages; count++ )
333 {
334 if ( add_mmu_update(xc_handle, mmu,
335 (page_array[count] << PAGE_SHIFT) |
336 MMU_MACHPHYS_UPDATE, count) )
337 {
338 munmap(physmap, PAGE_SIZE);
339 goto error_out;
340 }
341 *physmap_e++ = page_array[count];
342 if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
343 {
344 munmap(physmap, PAGE_SIZE);
345 physmap = physmap_e = xc_map_foreign_range(
346 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
347 page_array[physmap_pfn++]);
348 }
349 }
350 munmap(physmap, PAGE_SIZE);
352 #if defined(__i386__)
353 /*
354 * Pin down l2tab addr as page dir page - causes hypervisor to provide
355 * correct protection for the page
356 */
357 if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE, l2tab>>PAGE_SHIFT, dom) )
358 goto error_out;
359 #endif
361 #if defined(__x86_64__)
362 /*
363 * Pin down l4tab addr as page dir page - causes hypervisor to provide
364 * correct protection for the page
365 */
366 if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE, l4tab>>PAGE_SHIFT, dom) )
367 goto error_out;
368 #endif
369 start_info = xc_map_foreign_range(
370 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
371 page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
372 memset(start_info, 0, sizeof(*start_info));
373 start_info->nr_pages = nr_pages;
374 start_info->shared_info = shared_info_frame << PAGE_SHIFT;
375 start_info->flags = flags;
376 start_info->pt_base = vpt_start;
377 start_info->nr_pt_frames = nr_pt_pages;
378 start_info->mfn_list = vphysmap_start;
379 start_info->domain_controller_evtchn = control_evtchn;
380 if ( initrd_len != 0 )
381 {
382 start_info->mod_start = vinitrd_start;
383 start_info->mod_len = initrd_len;
384 }
385 strncpy((char *)start_info->cmd_line, cmdline, MAX_CMDLINE);
386 start_info->cmd_line[MAX_CMDLINE-1] = '\0';
387 munmap(start_info, PAGE_SIZE);
389 /* shared_info page starts its life empty. */
390 shared_info = xc_map_foreign_range(
391 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
392 memset(shared_info, 0, sizeof(shared_info_t));
393 /* Mask all upcalls... */
394 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
395 shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
397 shared_info->n_vcpu = vcpus;
398 printf(" VCPUS: %d\n", shared_info->n_vcpu);
400 munmap(shared_info, PAGE_SIZE);
402 /* Send the page update requests down to the hypervisor. */
403 if ( finish_mmu_updates(xc_handle, mmu) )
404 goto error_out;
406 free(mmu);
407 free(page_array);
409 *pvsi = vstartinfo_start;
410 *pvke = dsi.v_kernentry;
412 return 0;
414 error_out:
415 if ( mmu != NULL )
416 free(mmu);
417 if ( page_array != NULL )
418 free(page_array);
419 return -1;
420 }
422 int xc_linux_build(int xc_handle,
423 u32 domid,
424 const char *image_name,
425 const char *ramdisk_name,
426 const char *cmdline,
427 unsigned int control_evtchn,
428 unsigned long flags,
429 unsigned int vcpus)
430 {
431 dom0_op_t launch_op, op;
432 int initrd_fd = -1;
433 gzFile initrd_gfd = NULL;
434 int rc, i;
435 vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
436 unsigned long nr_pages;
437 char *image = NULL;
438 unsigned long image_size, initrd_size=0;
439 unsigned long vstartinfo_start, vkern_entry;
441 if ( (nr_pages = xc_get_tot_pages(xc_handle, domid)) < 0 )
442 {
443 PERROR("Could not find total pages for domain");
444 goto error_out;
445 }
447 if ( (image = xc_read_kernel_image(image_name, &image_size)) == NULL )
448 goto error_out;
450 if ( (ramdisk_name != NULL) && (strlen(ramdisk_name) != 0) )
451 {
452 if ( (initrd_fd = open(ramdisk_name, O_RDONLY)) < 0 )
453 {
454 PERROR("Could not open the initial ramdisk image");
455 goto error_out;
456 }
458 initrd_size = xc_get_filesz(initrd_fd);
460 if ( (initrd_gfd = gzdopen(initrd_fd, "rb")) == NULL )
461 {
462 PERROR("Could not allocate decompression state for initrd");
463 goto error_out;
464 }
465 }
467 if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
468 {
469 PERROR("Unable to mlock ctxt");
470 return 1;
471 }
473 op.cmd = DOM0_GETDOMAININFO;
474 op.u.getdomaininfo.domain = (domid_t)domid;
475 if ( (do_dom0_op(xc_handle, &op) < 0) ||
476 ((u16)op.u.getdomaininfo.domain != domid) )
477 {
478 PERROR("Could not get info on domain");
479 goto error_out;
480 }
482 if ( xc_domain_get_vcpu_context(xc_handle, domid, 0, ctxt) )
483 {
484 PERROR("Could not get vcpu context");
485 goto error_out;
486 }
488 if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) ||
489 (ctxt->pt_base != 0) )
490 {
491 ERROR("Domain is already constructed");
492 goto error_out;
493 }
495 if ( setup_guest(xc_handle, domid, image, image_size,
496 initrd_gfd, initrd_size, nr_pages,
497 &vstartinfo_start, &vkern_entry,
498 ctxt, cmdline,
499 op.u.getdomaininfo.shared_info_frame,
500 control_evtchn, flags, vcpus) < 0 )
501 {
502 ERROR("Error constructing guest OS");
503 goto error_out;
504 }
506 if ( initrd_fd >= 0 )
507 close(initrd_fd);
508 if ( initrd_gfd )
509 gzclose(initrd_gfd);
510 if ( image != NULL )
511 free(image);
513 ctxt->flags = 0;
515 /*
516 * Initial register values:
517 * DS,ES,FS,GS = FLAT_KERNEL_DS
518 * CS:EIP = FLAT_KERNEL_CS:start_pc
519 * SS:ESP = FLAT_KERNEL_DS:start_stack
520 * ESI = start_info
521 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
522 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
523 */
524 ctxt->user_regs.ds = FLAT_KERNEL_DS;
525 ctxt->user_regs.es = FLAT_KERNEL_DS;
526 ctxt->user_regs.fs = FLAT_KERNEL_DS;
527 ctxt->user_regs.gs = FLAT_KERNEL_DS;
528 ctxt->user_regs.ss = FLAT_KERNEL_SS;
529 ctxt->user_regs.cs = FLAT_KERNEL_CS;
530 ctxt->user_regs.eip = vkern_entry;
531 ctxt->user_regs.esp = vstartinfo_start + 2*PAGE_SIZE;
532 ctxt->user_regs.esi = vstartinfo_start;
533 ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
535 /* FPU is set up to default initial state. */
536 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
538 /* Virtual IDT is empty at start-of-day. */
539 for ( i = 0; i < 256; i++ )
540 {
541 ctxt->trap_ctxt[i].vector = i;
542 ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
543 }
545 /* No LDT. */
546 ctxt->ldt_ents = 0;
548 /* Use the default Xen-provided GDT. */
549 ctxt->gdt_ents = 0;
551 /* Ring 1 stack is the initial stack. */
552 ctxt->kernel_ss = FLAT_KERNEL_SS;
553 ctxt->kernel_sp = vstartinfo_start + 2*PAGE_SIZE;
555 /* No debugging. */
556 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
558 /* No callback handlers. */
559 #if defined(__i386__)
560 ctxt->event_callback_cs = FLAT_KERNEL_CS;
561 ctxt->event_callback_eip = 0;
562 ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
563 ctxt->failsafe_callback_eip = 0;
564 #elif defined(__x86_64__)
565 ctxt->event_callback_eip = 0;
566 ctxt->failsafe_callback_eip = 0;
567 ctxt->syscall_callback_eip = 0;
568 #endif
570 memset( &launch_op, 0, sizeof(launch_op) );
572 launch_op.u.setdomaininfo.domain = (domid_t)domid;
573 launch_op.u.setdomaininfo.vcpu = 0;
574 launch_op.u.setdomaininfo.ctxt = ctxt;
576 launch_op.cmd = DOM0_SETDOMAININFO;
577 rc = do_dom0_op(xc_handle, &launch_op);
579 return rc;
581 error_out:
582 if ( initrd_gfd != NULL )
583 gzclose(initrd_gfd);
584 else if ( initrd_fd >= 0 )
585 close(initrd_fd);
586 if ( image != NULL )
587 free(image);
589 return -1;
590 }