ia64/xen-unstable

view xen/arch/x86/domain_build.c @ 4258:beb68750d6e0

bitkeeper revision 1.1236.1.110 (4240375cOt3uh22RXHHKXqGhr8yvEg)

Restore "PHYSICAL MEMORY ARRANGEMENT" printk.
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Tue Mar 22 15:18:52 2005 +0000 (2005-03-22)
parents a13b9052d91d
children a01199a95070 8396f6da60b4
line source
1 /******************************************************************************
2 * domain_build.c
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/init.h>
9 #include <xen/lib.h>
10 #include <xen/sched.h>
11 #include <xen/smp.h>
12 #include <xen/delay.h>
13 #include <xen/event.h>
14 #include <xen/elf.h>
15 #include <xen/kernel.h>
16 #include <asm/regs.h>
17 #include <asm/system.h>
18 #include <asm/io.h>
19 #include <asm/processor.h>
20 #include <asm/desc.h>
21 #include <asm/i387.h>
22 #include <asm/shadow.h>
24 /* opt_dom0_mem: Kilobytes of memory allocated to domain 0. */
25 static unsigned int opt_dom0_mem = 0;
26 integer_param("dom0_mem", opt_dom0_mem);
28 #if defined(__i386__)
29 /* No ring-3 access in initial leaf page tables. */
30 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
31 #elif defined(__x86_64__)
32 /* Allow ring-3 access in long mode as guest cannot use ring 1. */
33 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
34 #endif
35 /* Don't change these: Linux expects just these bits to be set. */
36 /* (And that includes the bogus _PAGE_DIRTY!) */
37 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
38 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
39 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
41 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
42 #define round_pgdown(_p) ((_p)&PAGE_MASK)
44 static struct pfn_info *alloc_largest(struct domain *d, unsigned long max)
45 {
46 struct pfn_info *page;
47 unsigned int order = get_order(max * PAGE_SIZE);
48 if ( (max & (max-1)) != 0 )
49 order--;
50 while ( (page = alloc_domheap_pages(d, order)) == NULL )
51 if ( order-- == 0 )
52 break;
53 return page;
54 }
56 int construct_dom0(struct domain *d,
57 unsigned long _image_start, unsigned long image_len,
58 unsigned long _initrd_start, unsigned long initrd_len,
59 char *cmdline)
60 {
61 char *dst;
62 int i, rc;
63 unsigned long pfn, mfn;
64 unsigned long nr_pages;
65 unsigned long nr_pt_pages;
66 unsigned long alloc_start;
67 unsigned long alloc_end;
68 unsigned long count;
69 struct pfn_info *page = NULL;
70 start_info_t *si;
71 struct exec_domain *ed = d->exec_domain[0];
72 #if defined(__i386__)
73 char *image_start = (char *)_image_start; /* use lowmem mappings */
74 char *initrd_start = (char *)_initrd_start; /* use lowmem mappings */
75 #elif defined(__x86_64__)
76 char *image_start = __va(_image_start);
77 char *initrd_start = __va(_initrd_start);
78 l4_pgentry_t *l4tab = NULL, *l4start = NULL;
79 l3_pgentry_t *l3tab = NULL, *l3start = NULL;
80 #endif
81 l2_pgentry_t *l2tab = NULL, *l2start = NULL;
82 l1_pgentry_t *l1tab = NULL, *l1start = NULL;
84 /*
85 * This fully describes the memory layout of the initial domain. All
86 * *_start address are page-aligned, except v_start (and v_end) which are
87 * superpage-aligned.
88 */
89 struct domain_setup_info dsi;
90 unsigned long vinitrd_start;
91 unsigned long vinitrd_end;
92 unsigned long vphysmap_start;
93 unsigned long vphysmap_end;
94 unsigned long vstartinfo_start;
95 unsigned long vstartinfo_end;
96 unsigned long vstack_start;
97 unsigned long vstack_end;
98 unsigned long vpt_start;
99 unsigned long vpt_end;
100 unsigned long v_end;
102 /* Machine address of next candidate page-table page. */
103 unsigned long mpt_alloc;
105 extern void physdev_init_dom0(struct domain *);
107 /* Sanity! */
108 if ( d->id != 0 )
109 BUG();
110 if ( test_bit(DF_CONSTRUCTED, &d->d_flags) )
111 BUG();
113 memset(&dsi, 0, sizeof(struct domain_setup_info));
115 printk("*** LOADING DOMAIN 0 ***\n");
117 /* By default DOM0 is allocated all available memory. */
118 d->max_pages = ~0U;
119 if ( (nr_pages = opt_dom0_mem >> (PAGE_SHIFT - 10)) == 0 )
120 nr_pages = avail_domheap_pages() +
121 ((initrd_len + PAGE_SIZE - 1) >> PAGE_SHIFT) +
122 ((image_len + PAGE_SIZE - 1) >> PAGE_SHIFT);
123 if ( (page = alloc_largest(d, nr_pages)) == NULL )
124 panic("Not enough RAM for DOM0 reservation.\n");
125 alloc_start = page_to_phys(page);
126 alloc_end = alloc_start + (d->tot_pages << PAGE_SHIFT);
128 rc = parseelfimage(image_start, image_len, &dsi);
129 if ( rc != 0 )
130 return rc;
132 /* Set up domain options */
133 if ( dsi.use_writable_pagetables )
134 vm_assist(d, VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
136 /* Align load address to 4MB boundary. */
137 dsi.v_start &= ~((1UL<<22)-1);
139 /*
140 * Why do we need this? The number of page-table frames depends on the
141 * size of the bootstrap address space. But the size of the address space
142 * depends on the number of page-table frames (since each one is mapped
143 * read-only). We have a pair of simultaneous equations in two unknowns,
144 * which we solve by exhaustive search.
145 */
146 vinitrd_start = round_pgup(dsi.v_kernend);
147 vinitrd_end = vinitrd_start + initrd_len;
148 vphysmap_start = round_pgup(vinitrd_end);
149 vphysmap_end = vphysmap_start + (nr_pages * sizeof(u32));
150 vpt_start = round_pgup(vphysmap_end);
151 for ( nr_pt_pages = 2; ; nr_pt_pages++ )
152 {
153 vpt_end = vpt_start + (nr_pt_pages * PAGE_SIZE);
154 vstartinfo_start = vpt_end;
155 vstartinfo_end = vstartinfo_start + PAGE_SIZE;
156 vstack_start = vstartinfo_end;
157 vstack_end = vstack_start + PAGE_SIZE;
158 v_end = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1);
159 if ( (v_end - vstack_end) < (512UL << 10) )
160 v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
161 #if defined(__i386__)
162 if ( (((v_end - dsi.v_start + ((1UL<<L2_PAGETABLE_SHIFT)-1)) >>
163 L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
164 break;
165 #elif defined(__x86_64__)
166 #define NR(_l,_h,_s) \
167 (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
168 ((_l) & ~((1UL<<(_s))-1))) >> (_s))
169 if ( (1 + /* # L4 */
170 NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
171 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
172 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
173 <= nr_pt_pages )
174 break;
175 #endif
176 }
178 if ( (v_end - dsi.v_start) > (alloc_end - alloc_start) )
179 panic("Insufficient contiguous RAM to build kernel image.\n");
181 printk("PHYSICAL MEMORY ARRANGEMENT:\n"
182 " Dom0 alloc.: %p->%p",
183 alloc_start, alloc_end);
184 if ( d->tot_pages < nr_pages )
185 printk(" (%d pages to be allocated)",
186 nr_pages - d->tot_pages);
187 printk("\nVIRTUAL MEMORY ARRANGEMENT:\n"
188 " Loaded kernel: %p->%p\n"
189 " Init. ramdisk: %p->%p\n"
190 " Phys-Mach map: %p->%p\n"
191 " Page tables: %p->%p\n"
192 " Start info: %p->%p\n"
193 " Boot stack: %p->%p\n"
194 " TOTAL: %p->%p\n",
195 dsi.v_kernstart, dsi.v_kernend,
196 vinitrd_start, vinitrd_end,
197 vphysmap_start, vphysmap_end,
198 vpt_start, vpt_end,
199 vstartinfo_start, vstartinfo_end,
200 vstack_start, vstack_end,
201 dsi.v_start, v_end);
202 printk(" ENTRY ADDRESS: %p\n", dsi.v_kernentry);
204 if ( (v_end - dsi.v_start) > (nr_pages * PAGE_SIZE) )
205 {
206 printk("Initial guest OS requires too much space\n"
207 "(%luMB is greater than %luMB limit)\n",
208 (v_end-dsi.v_start)>>20, (nr_pages<<PAGE_SHIFT)>>20);
209 return -ENOMEM;
210 }
212 mpt_alloc = (vpt_start - dsi.v_start) + alloc_start;
214 SET_GDT_ENTRIES(ed, DEFAULT_GDT_ENTRIES);
215 SET_GDT_ADDRESS(ed, DEFAULT_GDT_ADDRESS);
217 /*
218 * We're basically forcing default RPLs to 1, so that our "what privilege
219 * level are we returning to?" logic works.
220 */
221 ed->arch.failsafe_selector = FLAT_KERNEL_CS;
222 ed->arch.event_selector = FLAT_KERNEL_CS;
223 ed->arch.kernel_ss = FLAT_KERNEL_SS;
224 for ( i = 0; i < 256; i++ )
225 ed->arch.traps[i].cs = FLAT_KERNEL_CS;
227 #if defined(__i386__)
229 /*
230 * Protect the lowest 1GB of memory. We use a temporary mapping there
231 * from which we copy the kernel and ramdisk images.
232 */
233 if ( dsi.v_start < (1UL<<30) )
234 {
235 printk("Initial loading isn't allowed to lowest 1GB of memory.\n");
236 return -EINVAL;
237 }
239 /* WARNING: The new domain must have its 'processor' field filled in! */
240 l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
241 memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
242 l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
243 mk_l2_pgentry((unsigned long)l2start | __PAGE_HYPERVISOR);
244 l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
245 mk_l2_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR);
246 ed->arch.guest_table = mk_pagetable((unsigned long)l2start);
248 l2tab += l2_table_offset(dsi.v_start);
249 mfn = alloc_start >> PAGE_SHIFT;
250 for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
251 {
252 if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
253 {
254 l1start = l1tab = (l1_pgentry_t *)mpt_alloc;
255 mpt_alloc += PAGE_SIZE;
256 *l2tab++ = mk_l2_pgentry((unsigned long)l1start | L2_PROT);
257 clear_page(l1tab);
258 if ( count == 0 )
259 l1tab += l1_table_offset(dsi.v_start);
260 }
261 *l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT);
263 page = &frame_table[mfn];
264 if ( !get_page_and_type(page, d, PGT_writable_page) )
265 BUG();
267 mfn++;
268 }
270 /* Pages that are part of page tables must be read only. */
271 l2tab = l2start + l2_table_offset(vpt_start);
272 l1start = l1tab = (l1_pgentry_t *)l2_pgentry_to_phys(*l2tab);
273 l1tab += l1_table_offset(vpt_start);
274 for ( count = 0; count < nr_pt_pages; count++ )
275 {
276 *l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
277 page = &frame_table[l1_pgentry_to_pfn(*l1tab)];
278 if ( count == 0 )
279 {
280 page->u.inuse.type_info &= ~PGT_type_mask;
281 page->u.inuse.type_info |= PGT_l2_page_table;
283 /*
284 * No longer writable: decrement the type_count.
285 * Installed as CR3: increment both the ref_count and type_count.
286 * Net: just increment the ref_count.
287 */
288 get_page(page, d); /* an extra ref because of readable mapping */
290 /* Get another ref to L2 page so that it can be pinned. */
291 if ( !get_page_and_type(page, d, PGT_l2_page_table) )
292 BUG();
293 set_bit(_PGT_pinned, &page->u.inuse.type_info);
294 }
295 else
296 {
297 page->u.inuse.type_info &= ~PGT_type_mask;
298 page->u.inuse.type_info |= PGT_l1_page_table;
299 page->u.inuse.type_info |=
300 ((dsi.v_start>>L2_PAGETABLE_SHIFT)+(count-1))<<PGT_va_shift;
302 /*
303 * No longer writable: decrement the type_count.
304 * This is an L1 page, installed in a validated L2 page:
305 * increment both the ref_count and type_count.
306 * Net: just increment the ref_count.
307 */
308 get_page(page, d); /* an extra ref because of readable mapping */
309 }
310 if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
311 l1start = l1tab = (l1_pgentry_t *)l2_pgentry_to_phys(*++l2tab);
312 }
314 #elif defined(__x86_64__)
316 /* Overlap with Xen protected area? */
317 if ( (dsi.v_start < HYPERVISOR_VIRT_END) &&
318 (v_end > HYPERVISOR_VIRT_START) )
319 {
320 printk("DOM0 image overlaps with Xen private area.\n");
321 return -EINVAL;
322 }
324 /* WARNING: The new domain must have its 'processor' field filled in! */
325 phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
326 l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
327 memcpy(l4tab, &idle_pg_table[0], PAGE_SIZE);
328 l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
329 mk_l4_pgentry(__pa(l4start) | __PAGE_HYPERVISOR);
330 l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
331 mk_l4_pgentry(__pa(d->arch.mm_perdomain_l3) | __PAGE_HYPERVISOR);
332 ed->arch.guest_table = mk_pagetable(__pa(l4start));
334 l4tab += l4_table_offset(dsi.v_start);
335 mfn = alloc_start >> PAGE_SHIFT;
336 for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
337 {
338 if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
339 {
340 phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table;
341 l1start = l1tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
342 clear_page(l1tab);
343 if ( count == 0 )
344 l1tab += l1_table_offset(dsi.v_start);
345 if ( !((unsigned long)l2tab & (PAGE_SIZE-1)) )
346 {
347 phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table;
348 l2start = l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
349 clear_page(l2tab);
350 if ( count == 0 )
351 l2tab += l2_table_offset(dsi.v_start);
352 if ( !((unsigned long)l3tab & (PAGE_SIZE-1)) )
353 {
354 phys_to_page(mpt_alloc)->u.inuse.type_info =
355 PGT_l3_page_table;
356 l3start = l3tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
357 clear_page(l3tab);
358 if ( count == 0 )
359 l3tab += l3_table_offset(dsi.v_start);
360 *l4tab++ = mk_l4_pgentry(__pa(l3start) | L4_PROT);
361 }
362 *l3tab++ = mk_l3_pgentry(__pa(l2start) | L3_PROT);
363 }
364 *l2tab++ = mk_l2_pgentry(__pa(l1start) | L2_PROT);
365 }
366 *l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT);
368 page = &frame_table[mfn];
369 if ( (page->u.inuse.type_info == 0) &&
370 !get_page_and_type(page, d, PGT_writable_page) )
371 BUG();
373 mfn++;
374 }
376 /* Pages that are part of page tables must be read only. */
377 l4tab = l4start + l4_table_offset(vpt_start);
378 l3start = l3tab = l4_pgentry_to_l3(*l4tab);
379 l3tab += l3_table_offset(vpt_start);
380 l2start = l2tab = l3_pgentry_to_l2(*l3tab);
381 l2tab += l2_table_offset(vpt_start);
382 l1start = l1tab = l2_pgentry_to_l1(*l2tab);
383 l1tab += l1_table_offset(vpt_start);
384 for ( count = 0; count < nr_pt_pages; count++ )
385 {
386 *l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
387 page = &frame_table[l1_pgentry_to_pfn(*l1tab)];
389 /* Read-only mapping + PGC_allocated + page-table page. */
390 page->count_info = PGC_allocated | 3;
391 page->u.inuse.type_info |= PGT_validated | 1;
393 /* Top-level p.t. is pinned. */
394 if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_l4_page_table )
395 {
396 page->count_info += 1;
397 page->u.inuse.type_info += 1 | PGT_pinned;
398 }
400 /* Iterate. */
401 if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
402 {
403 if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) )
404 {
405 if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) )
406 l3start = l3tab = l4_pgentry_to_l3(*++l4tab);
407 l2start = l2tab = l3_pgentry_to_l2(*l3tab);
408 }
409 l1start = l1tab = l2_pgentry_to_l1(*l2tab);
410 }
411 }
413 #endif /* __x86_64__ */
415 /* Mask all upcalls... */
416 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
417 d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
418 d->shared_info->n_vcpu = smp_num_cpus;
420 /* Set up shadow and monitor tables. */
421 update_pagetables(ed);
423 /* Install the new page tables. */
424 __cli();
425 write_ptbase(ed);
427 /* Copy the OS image and free temporary buffer. */
428 (void)loadelfimage(image_start);
429 init_domheap_pages(
430 _image_start, (_image_start+image_len+PAGE_SIZE-1) & PAGE_MASK);
432 /* Copy the initial ramdisk and free temporary buffer. */
433 if ( initrd_len != 0 )
434 {
435 memcpy((void *)vinitrd_start, initrd_start, initrd_len);
436 init_domheap_pages(
437 _initrd_start, (_initrd_start+initrd_len+PAGE_SIZE-1) & PAGE_MASK);
438 }
440 /* Set up start info area. */
441 si = (start_info_t *)vstartinfo_start;
442 memset(si, 0, PAGE_SIZE);
443 si->nr_pages = nr_pages;
444 si->shared_info = virt_to_phys(d->shared_info);
445 si->flags = SIF_PRIVILEGED | SIF_INITDOMAIN;
446 si->pt_base = vpt_start;
447 si->nr_pt_frames = nr_pt_pages;
448 si->mfn_list = vphysmap_start;
450 /* Write the phys->machine and machine->phys table entries. */
451 for ( pfn = 0; pfn < d->tot_pages; pfn++ )
452 {
453 mfn = pfn + (alloc_start>>PAGE_SHIFT);
454 #ifndef NDEBUG
455 #define REVERSE_START ((v_end - dsi.v_start) >> PAGE_SHIFT)
456 if ( pfn > REVERSE_START )
457 mfn = (alloc_end>>PAGE_SHIFT) - (pfn - REVERSE_START);
458 #endif
459 ((u32 *)vphysmap_start)[pfn] = mfn;
460 machine_to_phys_mapping[mfn] = pfn;
461 }
462 while ( pfn < nr_pages )
463 {
464 if ( (page = alloc_largest(d, nr_pages - d->tot_pages)) == NULL )
465 panic("Not enough RAM for DOM0 reservation.\n");
466 while ( pfn < d->tot_pages )
467 {
468 mfn = page_to_pfn(page);
469 #ifndef NDEBUG
470 #define pfn (nr_pages - 1 - (pfn - ((alloc_end - alloc_start) >> PAGE_SHIFT)))
471 #endif
472 ((u32 *)vphysmap_start)[pfn] = mfn;
473 machine_to_phys_mapping[mfn] = pfn;
474 #undef pfn
475 page++; pfn++;
476 }
477 }
479 if ( initrd_len != 0 )
480 {
481 si->mod_start = vinitrd_start;
482 si->mod_len = initrd_len;
483 printk("Initrd len 0x%lx, start at 0x%p\n",
484 si->mod_len, si->mod_start);
485 }
487 dst = (char *)si->cmd_line;
488 if ( cmdline != NULL )
489 {
490 for ( i = 0; i < 255; i++ )
491 {
492 if ( cmdline[i] == '\0' )
493 break;
494 *dst++ = cmdline[i];
495 }
496 }
497 *dst = '\0';
499 /* Reinstate the caller's page tables. */
500 write_ptbase(current);
501 __sti();
503 #if defined(__i386__)
504 /* Destroy low mappings - they were only for our convenience. */
505 for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
506 if ( l2_pgentry_val(l2start[i]) & _PAGE_PSE )
507 l2start[i] = mk_l2_pgentry(0);
508 zap_low_mappings(); /* Do the same for the idle page tables. */
509 #endif
511 /* DOM0 gets access to everything. */
512 physdev_init_dom0(d);
514 set_bit(DF_CONSTRUCTED, &d->d_flags);
516 new_thread(ed, dsi.v_kernentry, vstack_end, vstartinfo_start);
518 return 0;
519 }
521 int elf_sanity_check(Elf_Ehdr *ehdr)
522 {
523 if ( !IS_ELF(*ehdr) ||
524 #if defined(__i386__)
525 (ehdr->e_ident[EI_CLASS] != ELFCLASS32) ||
526 (ehdr->e_machine != EM_386) ||
527 #elif defined(__x86_64__)
528 (ehdr->e_ident[EI_CLASS] != ELFCLASS64) ||
529 (ehdr->e_machine != EM_X86_64) ||
530 #endif
531 (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) ||
532 (ehdr->e_type != ET_EXEC) )
533 {
534 printk("DOM0 image is not a Xen-compatible Elf image.\n");
535 return 0;
536 }
538 return 1;
539 }
541 /*
542 * Local variables:
543 * mode: C
544 * c-set-style: "BSD"
545 * c-basic-offset: 4
546 * tab-width: 4
547 * indent-tabs-mode: nil
548 * End:
549 */