ia64/xen-unstable

view xen/arch/x86/vmx_vmcs.c @ 6707:3bde4219c681

manual merge
author iap10@freefall.cl.cam.ac.uk
date Thu Sep 08 17:40:37 2005 +0000 (2005-09-08)
parents dd668f7527cb e3fd0fa58364
children aa0990ef260f
line source
1 /*
2 * vmx_vmcs.c: VMCS management
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/mm.h>
23 #include <xen/lib.h>
24 #include <xen/errno.h>
25 #include <xen/domain_page.h>
26 #include <asm/current.h>
27 #include <asm/cpufeature.h>
28 #include <asm/processor.h>
29 #include <asm/msr.h>
30 #include <asm/vmx.h>
31 #include <asm/flushtlb.h>
32 #include <xen/event.h>
33 #include <xen/kernel.h>
34 #include <public/io/ioreq.h>
35 #if CONFIG_PAGING_LEVELS >= 4
36 #include <asm/shadow_64.h>
37 #endif
38 #ifdef CONFIG_VMX
40 struct vmcs_struct *alloc_vmcs(void)
41 {
42 struct vmcs_struct *vmcs;
43 u32 vmx_msr_low, vmx_msr_high;
45 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
46 vmcs_size = vmx_msr_high & 0x1fff;
47 vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size));
48 memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
50 vmcs->vmcs_revision_id = vmx_msr_low;
51 return vmcs;
52 }
54 void free_vmcs(struct vmcs_struct *vmcs)
55 {
56 int order;
58 order = get_order_from_bytes(vmcs_size);
59 free_xenheap_pages(vmcs, order);
60 }
62 static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx)
63 {
64 int error = 0;
65 void *io_bitmap_a;
66 void *io_bitmap_b;
68 error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
69 MONITOR_PIN_BASED_EXEC_CONTROLS);
71 error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
72 MONITOR_CPU_BASED_EXEC_CONTROLS);
74 error |= __vmwrite(VM_EXIT_CONTROLS, MONITOR_VM_EXIT_CONTROLS);
76 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
78 /* need to use 0x1000 instead of PAGE_SIZE */
79 io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
80 io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
81 memset(io_bitmap_a, 0xff, 0x1000);
82 /* don't bother debug port access */
83 clear_bit(PC_DEBUG_PORT, io_bitmap_a);
84 memset(io_bitmap_b, 0xff, 0x1000);
86 error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_phys(io_bitmap_a));
87 error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_phys(io_bitmap_b));
89 arch_vmx->io_bitmap_a = io_bitmap_a;
90 arch_vmx->io_bitmap_b = io_bitmap_b;
92 return error;
93 }
95 #define GUEST_SEGMENT_LIMIT 0xffffffff
96 #define HOST_SEGMENT_LIMIT 0xffffffff
98 struct host_execution_env {
99 /* selectors */
100 unsigned short ldtr_selector;
101 unsigned short tr_selector;
102 unsigned short ds_selector;
103 unsigned short cs_selector;
104 /* limits */
105 unsigned short gdtr_limit;
106 unsigned short ldtr_limit;
107 unsigned short idtr_limit;
108 unsigned short tr_limit;
109 /* base */
110 unsigned long gdtr_base;
111 unsigned long ldtr_base;
112 unsigned long idtr_base;
113 unsigned long tr_base;
114 unsigned long ds_base;
115 unsigned long cs_base;
116 #ifdef __x86_64__
117 unsigned long fs_base;
118 unsigned long gs_base;
119 #endif
121 /* control registers */
122 unsigned long cr3;
123 unsigned long cr0;
124 unsigned long cr4;
125 unsigned long dr7;
126 };
128 #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
130 int vmx_setup_platform(struct vcpu *d, struct cpu_user_regs *regs)
131 {
132 int i;
133 unsigned int n;
134 unsigned long *p, mpfn, offset, addr;
135 struct e820entry *e820p;
136 unsigned long gpfn = 0;
138 local_flush_tlb_pge();
139 regs->ebx = 0; /* Linux expects ebx to be 0 for boot proc */
141 n = regs->ecx;
142 if (n > 32) {
143 VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d", n);
144 return -1;
145 }
147 addr = regs->edi;
148 offset = (addr & ~PAGE_MASK);
149 addr = round_pgdown(addr);
151 mpfn = get_mfn_from_pfn(addr >> PAGE_SHIFT);
152 p = map_domain_page(mpfn);
154 e820p = (struct e820entry *) ((unsigned long) p + offset);
156 #ifndef NDEBUG
157 print_e820_memory_map(e820p, n);
158 #endif
160 for ( i = 0; i < n; i++ )
161 {
162 if ( e820p[i].type == E820_SHARED_PAGE )
163 {
164 gpfn = (e820p[i].addr >> PAGE_SHIFT);
165 break;
166 }
167 }
169 if ( gpfn == 0 )
170 {
171 unmap_domain_page(p);
172 return -1;
173 }
175 unmap_domain_page(p);
177 /* Initialise shared page */
178 mpfn = get_mfn_from_pfn(gpfn);
179 p = map_domain_page(mpfn);
180 d->domain->arch.vmx_platform.shared_page_va = (unsigned long)p;
182 VMX_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d->domain));
184 clear_bit(iopacket_port(d->domain),
185 &d->domain->shared_info->evtchn_mask[0]);
187 return 0;
188 }
190 void vmx_set_host_env(struct vcpu *v)
191 {
192 unsigned int tr, cpu, error = 0;
193 struct host_execution_env host_env;
194 struct Xgt_desc_struct desc;
196 cpu = smp_processor_id();
197 __asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
198 host_env.idtr_limit = desc.size;
199 host_env.idtr_base = desc.address;
200 error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
202 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
203 host_env.gdtr_limit = desc.size;
204 host_env.gdtr_base = desc.address;
205 error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
207 __asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
208 host_env.tr_selector = tr;
209 host_env.tr_limit = sizeof(struct tss_struct);
210 host_env.tr_base = (unsigned long) &init_tss[cpu];
211 error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
212 error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
213 }
215 void vmx_do_launch(struct vcpu *v)
216 {
217 /* Update CR3, GDT, LDT, TR */
218 unsigned int error = 0;
219 unsigned long pfn = 0;
220 struct pfn_info *page;
221 struct cpu_user_regs *regs = guest_cpu_user_regs();
223 vmx_stts();
225 page = (struct pfn_info *) alloc_domheap_page(NULL);
226 pfn = (unsigned long) (page - frame_table);
228 vmx_setup_platform(v, regs);
230 vmx_set_host_env(v);
232 error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
233 error |= __vmwrite(GUEST_LDTR_BASE, 0);
234 error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
236 error |= __vmwrite(GUEST_TR_BASE, 0);
237 error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
239 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.guest_table));
240 __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
241 __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
243 v->arch.schedule_tail = arch_vmx_do_resume;
244 }
246 /*
247 * Initially set the same environement as host.
248 */
249 static inline int
250 construct_init_vmcs_guest(struct cpu_user_regs *regs,
251 struct vcpu_guest_context *ctxt,
252 struct host_execution_env *host_env)
253 {
254 int error = 0;
255 union vmcs_arbytes arbytes;
256 unsigned long dr7;
257 unsigned long eflags, shadow_cr;
259 /* MSR */
260 error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
261 error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
263 error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
264 error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
265 error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
266 /* interrupt */
267 error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
268 /* mask */
269 error |= __vmwrite(CR0_GUEST_HOST_MASK, -1UL);
270 error |= __vmwrite(CR4_GUEST_HOST_MASK, -1UL);
272 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
273 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
275 /* TSC */
276 error |= __vmwrite(TSC_OFFSET, 0);
277 error |= __vmwrite(CR3_TARGET_COUNT, 0);
279 /* Guest Selectors */
280 error |= __vmwrite(GUEST_CS_SELECTOR, regs->cs);
281 error |= __vmwrite(GUEST_ES_SELECTOR, regs->es);
282 error |= __vmwrite(GUEST_SS_SELECTOR, regs->ss);
283 error |= __vmwrite(GUEST_DS_SELECTOR, regs->ds);
284 error |= __vmwrite(GUEST_FS_SELECTOR, regs->fs);
285 error |= __vmwrite(GUEST_GS_SELECTOR, regs->gs);
287 /* Guest segment Limits */
288 error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
289 error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
290 error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
291 error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
292 error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
293 error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
295 error |= __vmwrite(GUEST_IDTR_LIMIT, host_env->idtr_limit);
297 /* AR bytes */
298 arbytes.bytes = 0;
299 arbytes.fields.seg_type = 0x3; /* type = 3 */
300 arbytes.fields.s = 1; /* code or data, i.e. not system */
301 arbytes.fields.dpl = 0; /* DPL = 3 */
302 arbytes.fields.p = 1; /* segment present */
303 arbytes.fields.default_ops_size = 1; /* 32-bit */
304 arbytes.fields.g = 1;
305 arbytes.fields.null_bit = 0; /* not null */
307 error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
308 error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
309 error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
310 error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
311 error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
313 arbytes.fields.seg_type = 0xb; /* type = 0xb */
314 error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
316 error |= __vmwrite(GUEST_GDTR_BASE, regs->edx);
317 regs->edx = 0;
318 error |= __vmwrite(GUEST_GDTR_LIMIT, regs->eax);
319 regs->eax = 0;
321 arbytes.fields.s = 0; /* not code or data segement */
322 arbytes.fields.seg_type = 0x2; /* LTD */
323 arbytes.fields.default_ops_size = 0; /* 16-bit */
324 arbytes.fields.g = 0;
325 error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
327 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
328 error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
330 error |= __vmwrite(GUEST_CR0, host_env->cr0); /* same CR0 */
332 /* Initally PG, PE are not set*/
333 shadow_cr = host_env->cr0;
334 shadow_cr &= ~X86_CR0_PG;
335 error |= __vmwrite(CR0_READ_SHADOW, shadow_cr);
336 /* CR3 is set in vmx_final_setup_guest */
337 #ifdef __x86_64__
338 error |= __vmwrite(GUEST_CR4, host_env->cr4 & ~X86_CR4_PSE);
339 #else
340 error |= __vmwrite(GUEST_CR4, host_env->cr4);
341 #endif
342 shadow_cr = host_env->cr4;
344 #ifdef __x86_64__
345 shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
346 #else
347 shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE);
348 #endif
349 error |= __vmwrite(CR4_READ_SHADOW, shadow_cr);
351 error |= __vmwrite(GUEST_ES_BASE, host_env->ds_base);
352 error |= __vmwrite(GUEST_CS_BASE, host_env->cs_base);
353 error |= __vmwrite(GUEST_SS_BASE, host_env->ds_base);
354 error |= __vmwrite(GUEST_DS_BASE, host_env->ds_base);
355 error |= __vmwrite(GUEST_FS_BASE, host_env->ds_base);
356 error |= __vmwrite(GUEST_GS_BASE, host_env->ds_base);
357 error |= __vmwrite(GUEST_IDTR_BASE, host_env->idtr_base);
359 error |= __vmwrite(GUEST_RSP, regs->esp);
360 error |= __vmwrite(GUEST_RIP, regs->eip);
362 eflags = regs->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
363 eflags |= VMCS_EFLAGS_RESERVED_1; /* set 1s */
365 error |= __vmwrite(GUEST_RFLAGS, eflags);
367 error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
368 __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
369 error |= __vmwrite(GUEST_DR7, dr7);
370 error |= __vmwrite(VMCS_LINK_POINTER, 0xffffffff);
371 error |= __vmwrite(VMCS_LINK_POINTER_HIGH, 0xffffffff);
373 return error;
374 }
376 static inline int construct_vmcs_host(struct host_execution_env *host_env)
377 {
378 int error = 0;
379 unsigned long crn;
381 /* Host Selectors */
382 host_env->ds_selector = __HYPERVISOR_DS;
383 error |= __vmwrite(HOST_ES_SELECTOR, host_env->ds_selector);
384 error |= __vmwrite(HOST_SS_SELECTOR, host_env->ds_selector);
385 error |= __vmwrite(HOST_DS_SELECTOR, host_env->ds_selector);
386 #if defined (__i386__)
387 error |= __vmwrite(HOST_FS_SELECTOR, host_env->ds_selector);
388 error |= __vmwrite(HOST_GS_SELECTOR, host_env->ds_selector);
389 error |= __vmwrite(HOST_FS_BASE, host_env->ds_base);
390 error |= __vmwrite(HOST_GS_BASE, host_env->ds_base);
392 #else
393 rdmsrl(MSR_FS_BASE, host_env->fs_base);
394 rdmsrl(MSR_GS_BASE, host_env->gs_base);
395 error |= __vmwrite(HOST_FS_BASE, host_env->fs_base);
396 error |= __vmwrite(HOST_GS_BASE, host_env->gs_base);
398 #endif
399 host_env->cs_selector = __HYPERVISOR_CS;
400 error |= __vmwrite(HOST_CS_SELECTOR, host_env->cs_selector);
402 host_env->ds_base = 0;
403 host_env->cs_base = 0;
405 __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (crn) : );
406 host_env->cr0 = crn;
407 error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
409 /* CR3 is set in vmx_final_setup_hostos */
410 __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) : );
411 host_env->cr4 = crn;
412 error |= __vmwrite(HOST_CR4, crn);
414 error |= __vmwrite(HOST_RIP, (unsigned long) vmx_asm_vmexit_handler);
415 #ifdef __x86_64__
416 /* TBD: support cr8 for 64-bit guest */
417 __vmwrite(VIRTUAL_APIC_PAGE_ADDR, 0);
418 __vmwrite(TPR_THRESHOLD, 0);
419 __vmwrite(SECONDARY_VM_EXEC_CONTROL, 0);
420 #endif
422 return error;
423 }
425 /*
426 * Need to extend to support full virtualization.
427 * The variable use_host_env indicates if the new VMCS needs to use
428 * the same setups as the host has (xenolinux).
429 */
431 int construct_vmcs(struct arch_vmx_struct *arch_vmx,
432 struct cpu_user_regs *regs,
433 struct vcpu_guest_context *ctxt,
434 int use_host_env)
435 {
436 int error;
437 u64 vmcs_phys_ptr;
439 struct host_execution_env host_env;
441 if (use_host_env != VMCS_USE_HOST_ENV)
442 return -EINVAL;
444 memset(&host_env, 0, sizeof(struct host_execution_env));
446 vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs);
448 if ((error = __vmpclear (vmcs_phys_ptr))) {
449 printk("construct_vmcs: VMCLEAR failed\n");
450 return -EINVAL;
451 }
452 if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
453 printk("construct_vmcs: load_vmcs failed: VMCS = %lx\n",
454 (unsigned long) vmcs_phys_ptr);
455 return -EINVAL;
456 }
457 if ((error = construct_vmcs_controls(arch_vmx))) {
458 printk("construct_vmcs: construct_vmcs_controls failed\n");
459 return -EINVAL;
460 }
461 /* host selectors */
462 if ((error = construct_vmcs_host(&host_env))) {
463 printk("construct_vmcs: construct_vmcs_host failed\n");
464 return -EINVAL;
465 }
466 /* guest selectors */
467 if ((error = construct_init_vmcs_guest(regs, ctxt, &host_env))) {
468 printk("construct_vmcs: construct_vmcs_guest failed\n");
469 return -EINVAL;
470 }
472 if ((error |= __vmwrite(EXCEPTION_BITMAP,
473 MONITOR_DEFAULT_EXCEPTION_BITMAP))) {
474 printk("construct_vmcs: setting Exception bitmap failed\n");
475 return -EINVAL;
476 }
478 if (regs->eflags & EF_TF)
479 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
480 else
481 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
483 return 0;
484 }
486 /*
487 * modify guest eflags and execption bitmap for gdb
488 */
489 int modify_vmcs(struct arch_vmx_struct *arch_vmx,
490 struct cpu_user_regs *regs)
491 {
492 int error;
493 u64 vmcs_phys_ptr, old, old_phys_ptr;
494 vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs);
496 old_phys_ptr = virt_to_phys(&old);
497 __vmptrst(old_phys_ptr);
498 if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
499 printk("modify_vmcs: load_vmcs failed: VMCS = %lx\n",
500 (unsigned long) vmcs_phys_ptr);
501 return -EINVAL;
502 }
503 load_cpu_user_regs(regs);
505 __vmptrld(old_phys_ptr);
507 return 0;
508 }
510 int load_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
511 {
512 int error;
514 if ((error = __vmptrld(phys_ptr))) {
515 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
516 return error;
517 }
518 set_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
519 return 0;
520 }
522 int store_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
523 {
524 /* take the current VMCS */
525 __vmptrst(phys_ptr);
526 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
527 return 0;
528 }
530 void vm_launch_fail(unsigned long eflags)
531 {
532 unsigned long error;
533 __vmread(VM_INSTRUCTION_ERROR, &error);
534 printk("<vm_launch_fail> error code %lx\n", error);
535 __vmx_bug(guest_cpu_user_regs());
536 }
538 void vm_resume_fail(unsigned long eflags)
539 {
540 unsigned long error;
541 __vmread(VM_INSTRUCTION_ERROR, &error);
542 printk("<vm_resume_fail> error code %lx\n", error);
543 __vmx_bug(guest_cpu_user_regs());
544 }
546 void arch_vmx_do_resume(struct vcpu *v)
547 {
548 u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
550 load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
551 vmx_do_resume(v);
552 reset_stack_and_jump(vmx_asm_do_resume);
553 }
555 void arch_vmx_do_launch(struct vcpu *v)
556 {
557 u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
559 load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
560 vmx_do_launch(v);
561 reset_stack_and_jump(vmx_asm_do_launch);
562 }
564 void arch_vmx_do_relaunch(struct vcpu *v)
565 {
566 u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
568 load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
569 vmx_do_resume(v);
570 vmx_set_host_env(v);
571 v->arch.schedule_tail = arch_vmx_do_resume;
573 reset_stack_and_jump(vmx_asm_do_relaunch);
574 }
576 #endif /* CONFIG_VMX */
578 /*
579 * Local variables:
580 * mode: C
581 * c-set-style: "BSD"
582 * c-basic-offset: 4
583 * tab-width: 4
584 * indent-tabs-mode: nil
585 * End:
586 */