ia64/xen-unstable

view xen/common/xenoprof.c @ 15647:cc48264ed647

Merge
author Tim Deegan <Tim.Deegan@xensource.com>
date Tue Jul 24 14:53:06 2007 +0100 (2007-07-24)
parents 3c28bc13a3f8
children 96f64f4c42f0
line source
1 /*
2 * Copyright (C) 2005 Hewlett-Packard Co.
3 * written by Aravind Menon & Jose Renato Santos
4 * (email: xenoprof@groups.hp.com)
5 *
6 * arch generic xenoprof and IA64 support.
7 * dynamic map/unmap xenoprof buffer support.
8 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
9 * VA Linux Systems Japan K.K.
10 */
12 #ifndef COMPAT
13 #include <xen/guest_access.h>
14 #include <xen/sched.h>
15 #include <public/xenoprof.h>
16 #include <xen/paging.h>
18 /* Limit amount of pages used for shared buffer (per domain) */
19 #define MAX_OPROF_SHARED_PAGES 32
21 /* Lock protecting the following global state */
22 static DEFINE_SPINLOCK(xenoprof_lock);
24 static struct domain *active_domains[MAX_OPROF_DOMAINS];
25 static int active_ready[MAX_OPROF_DOMAINS];
26 static unsigned int adomains;
28 static struct domain *passive_domains[MAX_OPROF_DOMAINS];
29 static unsigned int pdomains;
31 static unsigned int activated;
32 static struct domain *xenoprof_primary_profiler;
33 static int xenoprof_state = XENOPROF_IDLE;
34 static unsigned long backtrace_depth;
36 static u64 total_samples;
37 static u64 invalid_buffer_samples;
38 static u64 corrupted_buffer_samples;
39 static u64 lost_samples;
40 static u64 active_samples;
41 static u64 passive_samples;
42 static u64 idle_samples;
43 static u64 others_samples;
45 int is_active(struct domain *d)
46 {
47 struct xenoprof *x = d->xenoprof;
48 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
49 }
51 static int is_passive(struct domain *d)
52 {
53 struct xenoprof *x = d->xenoprof;
54 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
55 }
57 static int is_profiled(struct domain *d)
58 {
59 return (is_active(d) || is_passive(d));
60 }
62 static void xenoprof_reset_stat(void)
63 {
64 total_samples = 0;
65 invalid_buffer_samples = 0;
66 corrupted_buffer_samples = 0;
67 lost_samples = 0;
68 active_samples = 0;
69 passive_samples = 0;
70 idle_samples = 0;
71 others_samples = 0;
72 }
74 static void xenoprof_reset_buf(struct domain *d)
75 {
76 int j;
77 xenoprof_buf_t *buf;
79 if ( d->xenoprof == NULL )
80 {
81 printk("xenoprof_reset_buf: ERROR - Unexpected "
82 "Xenoprof NULL pointer \n");
83 return;
84 }
86 for ( j = 0; j < MAX_VIRT_CPUS; j++ )
87 {
88 buf = d->xenoprof->vcpu[j].buffer;
89 if ( buf != NULL )
90 {
91 xenoprof_buf(d, buf, event_head) = 0;
92 xenoprof_buf(d, buf, event_tail) = 0;
93 }
94 }
95 }
97 static int
98 share_xenoprof_page_with_guest(struct domain *d, unsigned long mfn, int npages)
99 {
100 int i;
102 /* Check if previous page owner has released the page. */
103 for ( i = 0; i < npages; i++ )
104 {
105 struct page_info *page = mfn_to_page(mfn + i);
106 if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
107 {
108 gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%x\n",
109 mfn + i, page->count_info);
110 return -EBUSY;
111 }
112 page_set_owner(page, NULL);
113 }
115 for ( i = 0; i < npages; i++ )
116 share_xen_page_with_guest(mfn_to_page(mfn + i), d, XENSHARE_writable);
118 return 0;
119 }
121 static void
122 unshare_xenoprof_page_with_guest(struct xenoprof *x)
123 {
124 int i, npages = x->npages;
125 unsigned long mfn = virt_to_mfn(x->rawbuf);
127 for ( i = 0; i < npages; i++ )
128 {
129 struct page_info *page = mfn_to_page(mfn + i);
130 BUG_ON(page_get_owner(page) != current->domain);
131 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
132 put_page(page);
133 }
134 }
136 static void
137 xenoprof_shared_gmfn_with_guest(
138 struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages)
139 {
140 int i;
142 for ( i = 0; i < npages; i++, maddr += PAGE_SIZE, gmaddr += PAGE_SIZE )
143 {
144 BUG_ON(page_get_owner(maddr_to_page(maddr)) != d);
145 xenoprof_shared_gmfn(d, gmaddr, maddr);
146 }
147 }
149 static int alloc_xenoprof_struct(
150 struct domain *d, int max_samples, int is_passive)
151 {
152 struct vcpu *v;
153 int nvcpu, npages, bufsize, max_bufsize;
154 unsigned max_max_samples;
155 int i;
157 d->xenoprof = xmalloc(struct xenoprof);
159 if ( d->xenoprof == NULL )
160 {
161 printk("alloc_xenoprof_struct(): memory allocation failed\n");
162 return -ENOMEM;
163 }
165 memset(d->xenoprof, 0, sizeof(*d->xenoprof));
167 nvcpu = 0;
168 for_each_vcpu ( d, v )
169 nvcpu++;
171 bufsize = sizeof(struct xenoprof_buf);
172 i = sizeof(struct event_log);
173 #ifdef CONFIG_COMPAT
174 d->xenoprof->is_compat = IS_COMPAT(is_passive ? dom0 : d);
175 if ( XENOPROF_COMPAT(d->xenoprof) )
176 {
177 bufsize = sizeof(struct compat_oprof_buf);
178 i = sizeof(struct compat_event_log);
179 }
180 #endif
182 /* reduce max_samples if necessary to limit pages allocated */
183 max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
184 max_max_samples = ( (max_bufsize - bufsize) / i ) + 1;
185 if ( (unsigned)max_samples > max_max_samples )
186 max_samples = max_max_samples;
188 bufsize += (max_samples - 1) * i;
189 npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
191 d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages));
192 if ( d->xenoprof->rawbuf == NULL )
193 {
194 xfree(d->xenoprof);
195 d->xenoprof = NULL;
196 return -ENOMEM;
197 }
199 d->xenoprof->npages = npages;
200 d->xenoprof->nbuf = nvcpu;
201 d->xenoprof->bufsize = bufsize;
202 d->xenoprof->domain_ready = 0;
203 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
205 /* Update buffer pointers for active vcpus */
206 i = 0;
207 for_each_vcpu ( d, v )
208 {
209 xenoprof_buf_t *buf = (xenoprof_buf_t *)
210 &d->xenoprof->rawbuf[i * bufsize];
212 d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
213 d->xenoprof->vcpu[v->vcpu_id].buffer = buf;
214 xenoprof_buf(d, buf, event_size) = max_samples;
215 xenoprof_buf(d, buf, vcpu_id) = v->vcpu_id;
217 i++;
218 /* in the unlikely case that the number of active vcpus changes */
219 if ( i >= nvcpu )
220 break;
221 }
223 return 0;
224 }
226 void free_xenoprof_pages(struct domain *d)
227 {
228 struct xenoprof *x;
229 int order;
231 x = d->xenoprof;
232 if ( x == NULL )
233 return;
235 if ( x->rawbuf != NULL )
236 {
237 order = get_order_from_pages(x->npages);
238 free_xenheap_pages(x->rawbuf, order);
239 }
241 xfree(x);
242 d->xenoprof = NULL;
243 }
245 static int active_index(struct domain *d)
246 {
247 int i;
249 for ( i = 0; i < adomains; i++ )
250 if ( active_domains[i] == d )
251 return i;
253 return -1;
254 }
256 static int set_active(struct domain *d)
257 {
258 int ind;
259 struct xenoprof *x;
261 ind = active_index(d);
262 if ( ind < 0 )
263 return -EPERM;
265 x = d->xenoprof;
266 if ( x == NULL )
267 return -EPERM;
269 x->domain_ready = 1;
270 x->domain_type = XENOPROF_DOMAIN_ACTIVE;
271 active_ready[ind] = 1;
272 activated++;
274 return 0;
275 }
277 static int reset_active(struct domain *d)
278 {
279 int ind;
280 struct xenoprof *x;
282 ind = active_index(d);
283 if ( ind < 0 )
284 return -EPERM;
286 x = d->xenoprof;
287 if ( x == NULL )
288 return -EPERM;
290 x->domain_ready = 0;
291 x->domain_type = XENOPROF_DOMAIN_IGNORED;
292 active_ready[ind] = 0;
293 active_domains[ind] = NULL;
294 activated--;
295 put_domain(d);
297 if ( activated <= 0 )
298 adomains = 0;
300 return 0;
301 }
303 static void reset_passive(struct domain *d)
304 {
305 struct xenoprof *x;
307 if ( d == NULL )
308 return;
310 x = d->xenoprof;
311 if ( x == NULL )
312 return;
314 unshare_xenoprof_page_with_guest(x);
315 x->domain_type = XENOPROF_DOMAIN_IGNORED;
316 }
318 static void reset_active_list(void)
319 {
320 int i;
322 for ( i = 0; i < adomains; i++ )
323 if ( active_ready[i] )
324 reset_active(active_domains[i]);
326 adomains = 0;
327 activated = 0;
328 }
330 static void reset_passive_list(void)
331 {
332 int i;
334 for ( i = 0; i < pdomains; i++ )
335 {
336 reset_passive(passive_domains[i]);
337 put_domain(passive_domains[i]);
338 passive_domains[i] = NULL;
339 }
341 pdomains = 0;
342 }
344 static int add_active_list(domid_t domid)
345 {
346 struct domain *d;
348 if ( adomains >= MAX_OPROF_DOMAINS )
349 return -E2BIG;
351 d = get_domain_by_id(domid);
352 if ( d == NULL )
353 return -EINVAL;
355 active_domains[adomains] = d;
356 active_ready[adomains] = 0;
357 adomains++;
359 return 0;
360 }
362 static int add_passive_list(XEN_GUEST_HANDLE(void) arg)
363 {
364 struct xenoprof_passive passive;
365 struct domain *d;
366 int ret = 0;
368 if ( pdomains >= MAX_OPROF_DOMAINS )
369 return -E2BIG;
371 if ( copy_from_guest(&passive, arg, 1) )
372 return -EFAULT;
374 d = get_domain_by_id(passive.domain_id);
375 if ( d == NULL )
376 return -EINVAL;
378 if ( d->xenoprof == NULL )
379 {
380 ret = alloc_xenoprof_struct(d, passive.max_samples, 1);
381 if ( ret < 0 )
382 {
383 put_domain(d);
384 return -ENOMEM;
385 }
386 }
388 ret = share_xenoprof_page_with_guest(
389 current->domain, virt_to_mfn(d->xenoprof->rawbuf),
390 d->xenoprof->npages);
391 if ( ret < 0 )
392 {
393 put_domain(d);
394 return ret;
395 }
397 d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
398 passive.nbuf = d->xenoprof->nbuf;
399 passive.bufsize = d->xenoprof->bufsize;
400 if ( !paging_mode_translate(current->domain) )
401 passive.buf_gmaddr = __pa(d->xenoprof->rawbuf);
402 else
403 xenoprof_shared_gmfn_with_guest(
404 current->domain, __pa(d->xenoprof->rawbuf),
405 passive.buf_gmaddr, d->xenoprof->npages);
407 if ( copy_to_guest(arg, &passive, 1) )
408 {
409 put_domain(d);
410 return -EFAULT;
411 }
413 passive_domains[pdomains] = d;
414 pdomains++;
416 return ret;
417 }
420 /* Get space in the buffer */
421 static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size)
422 {
423 int head, tail;
425 head = xenoprof_buf(d, buf, event_head);
426 tail = xenoprof_buf(d, buf, event_tail);
428 return ((tail > head) ? 0 : size) + tail - head - 1;
429 }
431 /* Check for space and add a sample. Return 1 if successful, 0 otherwise. */
432 static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf,
433 unsigned long eip, int mode, int event)
434 {
435 int head, tail, size;
437 head = xenoprof_buf(d, buf, event_head);
438 tail = xenoprof_buf(d, buf, event_tail);
439 size = xenoprof_buf(d, buf, event_size);
441 /* make sure indexes in shared buffer are sane */
442 if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
443 {
444 corrupted_buffer_samples++;
445 return 0;
446 }
448 if ( xenoprof_buf_space(d, buf, size) > 0 )
449 {
450 xenoprof_buf(d, buf, event_log[head].eip) = eip;
451 xenoprof_buf(d, buf, event_log[head].mode) = mode;
452 xenoprof_buf(d, buf, event_log[head].event) = event;
453 head++;
454 if ( head >= size )
455 head = 0;
457 xenoprof_buf(d, buf, event_head) = head;
458 }
459 else
460 {
461 xenoprof_buf(d, buf, lost_samples)++;
462 lost_samples++;
463 return 0;
464 }
466 return 1;
467 }
469 int xenoprof_add_trace(struct domain *d, struct vcpu *vcpu,
470 unsigned long eip, int mode)
471 {
472 xenoprof_buf_t *buf = d->xenoprof->vcpu[vcpu->vcpu_id].buffer;
474 /* Do not accidentally write an escape code due to a broken frame. */
475 if ( eip == XENOPROF_ESCAPE_CODE )
476 {
477 invalid_buffer_samples++;
478 return 0;
479 }
481 return xenoprof_add_sample(d, buf, eip, mode, 0);
482 }
484 void xenoprof_log_event(struct vcpu *vcpu,
485 struct cpu_user_regs * regs, unsigned long eip,
486 int mode, int event)
487 {
488 struct domain *d = vcpu->domain;
489 struct xenoprof_vcpu *v;
490 xenoprof_buf_t *buf;
492 total_samples++;
494 /* Ignore samples of un-monitored domains. */
495 if ( !is_profiled(d) )
496 {
497 others_samples++;
498 return;
499 }
501 v = &d->xenoprof->vcpu[vcpu->vcpu_id];
502 if ( v->buffer == NULL )
503 {
504 invalid_buffer_samples++;
505 return;
506 }
508 buf = v->buffer;
510 /* Provide backtrace if requested. */
511 if ( backtrace_depth > 0 )
512 {
513 if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) ||
514 !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode,
515 XENOPROF_TRACE_BEGIN) )
516 {
517 xenoprof_buf(d, buf, lost_samples)++;
518 lost_samples++;
519 return;
520 }
521 }
523 if ( xenoprof_add_sample(d, buf, eip, mode, event) )
524 {
525 if ( is_active(vcpu->domain) )
526 active_samples++;
527 else
528 passive_samples++;
529 if ( mode == 0 )
530 xenoprof_buf(d, buf, user_samples)++;
531 else if ( mode == 1 )
532 xenoprof_buf(d, buf, kernel_samples)++;
533 else
534 xenoprof_buf(d, buf, xen_samples)++;
536 }
538 if ( backtrace_depth > 0 )
539 xenoprof_backtrace(d, vcpu, regs, backtrace_depth, mode);
540 }
544 static int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
545 {
546 struct domain *d = current->domain;
547 struct xenoprof_init xenoprof_init;
548 int ret;
550 if ( copy_from_guest(&xenoprof_init, arg, 1) )
551 return -EFAULT;
553 if ( (ret = xenoprof_arch_init(&xenoprof_init.num_events,
554 xenoprof_init.cpu_type)) )
555 return ret;
557 xenoprof_init.is_primary =
558 ((xenoprof_primary_profiler == d) ||
559 ((xenoprof_primary_profiler == NULL) && (d->domain_id == 0)));
560 if ( xenoprof_init.is_primary )
561 xenoprof_primary_profiler = current->domain;
563 return (copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0);
564 }
566 #endif /* !COMPAT */
568 static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE(void) arg)
569 {
570 struct xenoprof_get_buffer xenoprof_get_buffer;
571 struct domain *d = current->domain;
572 int ret;
574 if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
575 return -EFAULT;
577 /*
578 * We allocate xenoprof struct and buffers only at first time
579 * get_buffer is called. Memory is then kept until domain is destroyed.
580 */
581 if ( d->xenoprof == NULL )
582 {
583 ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0);
584 if ( ret < 0 )
585 return ret;
586 }
588 ret = share_xenoprof_page_with_guest(
589 d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages);
590 if ( ret < 0 )
591 return ret;
593 xenoprof_reset_buf(d);
595 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
596 d->xenoprof->domain_ready = 0;
597 d->xenoprof->is_primary = (xenoprof_primary_profiler == current->domain);
599 xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
600 xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
601 if ( !paging_mode_translate(d) )
602 xenoprof_get_buffer.buf_gmaddr = __pa(d->xenoprof->rawbuf);
603 else
604 xenoprof_shared_gmfn_with_guest(
605 d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr,
606 d->xenoprof->npages);
608 if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
609 return -EFAULT;
611 return 0;
612 }
614 #define NONPRIV_OP(op) ( (op == XENOPROF_init) \
615 || (op == XENOPROF_enable_virq) \
616 || (op == XENOPROF_disable_virq) \
617 || (op == XENOPROF_get_buffer))
619 int do_xenoprof_op(int op, XEN_GUEST_HANDLE(void) arg)
620 {
621 int ret = 0;
623 if ( (op < 0) || (op > XENOPROF_last_op) )
624 {
625 printk("xenoprof: invalid operation %d for domain %d\n",
626 op, current->domain->domain_id);
627 return -EINVAL;
628 }
630 if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
631 {
632 printk("xenoprof: dom %d denied privileged operation %d\n",
633 current->domain->domain_id, op);
634 return -EPERM;
635 }
637 spin_lock(&xenoprof_lock);
639 switch ( op )
640 {
641 case XENOPROF_init:
642 ret = xenoprof_op_init(arg);
643 break;
645 case XENOPROF_get_buffer:
646 ret = xenoprof_op_get_buffer(arg);
647 break;
649 case XENOPROF_reset_active_list:
650 {
651 reset_active_list();
652 ret = 0;
653 break;
654 }
655 case XENOPROF_reset_passive_list:
656 {
657 reset_passive_list();
658 ret = 0;
659 break;
660 }
661 case XENOPROF_set_active:
662 {
663 domid_t domid;
664 if ( xenoprof_state != XENOPROF_IDLE )
665 {
666 ret = -EPERM;
667 break;
668 }
669 if ( copy_from_guest(&domid, arg, 1) )
670 {
671 ret = -EFAULT;
672 break;
673 }
674 ret = add_active_list(domid);
675 break;
676 }
677 case XENOPROF_set_passive:
678 {
679 if ( xenoprof_state != XENOPROF_IDLE )
680 {
681 ret = -EPERM;
682 break;
683 }
684 ret = add_passive_list(arg);
685 break;
686 }
687 case XENOPROF_reserve_counters:
688 if ( xenoprof_state != XENOPROF_IDLE )
689 {
690 ret = -EPERM;
691 break;
692 }
693 ret = xenoprof_arch_reserve_counters();
694 if ( !ret )
695 xenoprof_state = XENOPROF_COUNTERS_RESERVED;
696 break;
698 case XENOPROF_counter:
699 if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
700 (adomains == 0) )
701 {
702 ret = -EPERM;
703 break;
704 }
706 ret = xenoprof_arch_counter(arg);
707 break;
709 case XENOPROF_setup_events:
710 if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
711 {
712 ret = -EPERM;
713 break;
714 }
715 ret = xenoprof_arch_setup_events();
716 if ( !ret )
717 xenoprof_state = XENOPROF_READY;
718 break;
720 case XENOPROF_enable_virq:
721 {
722 int i;
723 if ( current->domain == xenoprof_primary_profiler )
724 {
725 xenoprof_arch_enable_virq();
726 xenoprof_reset_stat();
727 for ( i = 0; i < pdomains; i++ )
728 xenoprof_reset_buf(passive_domains[i]);
729 }
730 xenoprof_reset_buf(current->domain);
731 ret = set_active(current->domain);
732 break;
733 }
735 case XENOPROF_start:
736 ret = -EPERM;
737 if ( (xenoprof_state == XENOPROF_READY) &&
738 (activated == adomains) )
739 ret = xenoprof_arch_start();
740 if ( ret == 0 )
741 xenoprof_state = XENOPROF_PROFILING;
742 break;
744 case XENOPROF_stop:
745 if ( xenoprof_state != XENOPROF_PROFILING )
746 {
747 ret = -EPERM;
748 break;
749 }
750 xenoprof_arch_stop();
751 xenoprof_state = XENOPROF_READY;
752 break;
754 case XENOPROF_disable_virq:
755 {
756 struct xenoprof *x;
757 if ( (xenoprof_state == XENOPROF_PROFILING) &&
758 (is_active(current->domain)) )
759 {
760 ret = -EPERM;
761 break;
762 }
763 if ( (ret = reset_active(current->domain)) != 0 )
764 break;
765 x = current->domain->xenoprof;
766 unshare_xenoprof_page_with_guest(x);
767 break;
768 }
770 case XENOPROF_release_counters:
771 ret = -EPERM;
772 if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
773 (xenoprof_state == XENOPROF_READY) )
774 {
775 xenoprof_state = XENOPROF_IDLE;
776 xenoprof_arch_release_counters();
777 xenoprof_arch_disable_virq();
778 reset_passive_list();
779 ret = 0;
780 }
781 break;
783 case XENOPROF_shutdown:
784 ret = -EPERM;
785 if ( xenoprof_state == XENOPROF_IDLE )
786 {
787 activated = 0;
788 adomains=0;
789 xenoprof_primary_profiler = NULL;
790 backtrace_depth=0;
791 ret = 0;
792 }
793 break;
795 case XENOPROF_set_backtrace:
796 ret = 0;
797 if ( !xenoprof_backtrace_supported() )
798 ret = -EINVAL;
799 else if ( copy_from_guest(&backtrace_depth, arg, 1) )
800 ret = -EFAULT;
801 break;
803 default:
804 ret = -ENOSYS;
805 }
807 spin_unlock(&xenoprof_lock);
809 if ( ret < 0 )
810 printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
811 op, current->domain->domain_id, ret);
813 return ret;
814 }
816 #if defined(CONFIG_COMPAT) && !defined(COMPAT)
817 #include "compat/xenoprof.c"
818 #endif
820 /*
821 * Local variables:
822 * mode: C
823 * c-set-style: "BSD"
824 * c-basic-offset: 4
825 * tab-width: 4
826 * indent-tabs-mode: nil
827 * End:
828 */