ia64/xen-unstable

view xen/common/domain.c @ 17965:14fd83fe71c3

Add facility to get notification of domain suspend by event channel.
This event channel will be notified when the domain transitions to the
suspended state, which can be much faster than raising VIRQ_DOM_EXC
and waiting for the notification to be propagated via xenstore.

No attempt is made here to prevent multiple subscribers (last one
wins), or to detect that the subscriber has gone away. Userspace tools
should take care.

Signed-off-by: Brendan Cully <brendan@cs.ubc.ca>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jul 04 12:00:24 2008 +0100 (2008-07-04)
parents 3a40a6997cc0
children 9353f9931d96
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/compat.h>
9 #include <xen/init.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/domain.h>
14 #include <xen/mm.h>
15 #include <xen/event.h>
16 #include <xen/time.h>
17 #include <xen/console.h>
18 #include <xen/softirq.h>
19 #include <xen/domain_page.h>
20 #include <xen/rangeset.h>
21 #include <xen/guest_access.h>
22 #include <xen/hypercall.h>
23 #include <xen/delay.h>
24 #include <xen/shutdown.h>
25 #include <xen/percpu.h>
26 #include <xen/multicall.h>
27 #include <xen/rcupdate.h>
28 #include <asm/debugger.h>
29 #include <public/sched.h>
30 #include <public/vcpu.h>
31 #include <xsm/xsm.h>
33 /* Linux config option: propageted to domain0 */
34 /* xen_processor_pmbits: xen control Cx, Px, ... */
35 unsigned int xen_processor_pmbits = 0;
37 /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
38 static unsigned int opt_dom0_vcpus_pin;
39 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
41 enum cpufreq_controller cpufreq_controller;
42 static void __init setup_cpufreq_option(char *str)
43 {
44 if ( !strcmp(str, "dom0-kernel") )
45 {
46 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
47 cpufreq_controller = FREQCTL_dom0_kernel;
48 opt_dom0_vcpus_pin = 1;
49 }
50 else if ( !strcmp(str, "xen") )
51 {
52 xen_processor_pmbits |= XEN_PROCESSOR_PM_PX;
53 cpufreq_controller = FREQCTL_none;
54 }
55 }
56 custom_param("cpufreq", setup_cpufreq_option);
58 /* Protect updates/reads (resp.) of domain_list and domain_hash. */
59 DEFINE_SPINLOCK(domlist_update_lock);
60 DEFINE_RCU_READ_LOCK(domlist_read_lock);
62 #define DOMAIN_HASH_SIZE 256
63 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
64 static struct domain *domain_hash[DOMAIN_HASH_SIZE];
65 struct domain *domain_list;
67 struct domain *dom0;
69 struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
71 int current_domain_id(void)
72 {
73 return current->domain->domain_id;
74 }
76 static struct domain *alloc_domain_struct(void)
77 {
78 return xmalloc(struct domain);
79 }
81 static void free_domain_struct(struct domain *d)
82 {
83 xfree(d);
84 }
86 static void __domain_finalise_shutdown(struct domain *d)
87 {
88 struct vcpu *v;
90 BUG_ON(!spin_is_locked(&d->shutdown_lock));
92 if ( d->is_shut_down )
93 return;
95 for_each_vcpu ( d, v )
96 if ( !v->paused_for_shutdown )
97 return;
99 d->is_shut_down = 1;
100 if ( d->shutdown_code == SHUTDOWN_suspend
101 && d->suspend_evtchn > 0 )
102 {
103 evtchn_set_pending(dom0->vcpu[0], d->suspend_evtchn);
104 }
105 else
106 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
107 }
109 static void vcpu_check_shutdown(struct vcpu *v)
110 {
111 struct domain *d = v->domain;
113 spin_lock(&d->shutdown_lock);
115 if ( d->is_shutting_down )
116 {
117 if ( !v->paused_for_shutdown )
118 vcpu_pause_nosync(v);
119 v->paused_for_shutdown = 1;
120 v->defer_shutdown = 0;
121 __domain_finalise_shutdown(d);
122 }
124 spin_unlock(&d->shutdown_lock);
125 }
127 struct vcpu *alloc_vcpu(
128 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
129 {
130 struct vcpu *v;
132 BUG_ON(d->vcpu[vcpu_id] != NULL);
134 if ( (v = alloc_vcpu_struct()) == NULL )
135 return NULL;
137 v->domain = d;
138 v->vcpu_id = vcpu_id;
140 v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
141 v->runstate.state_entry_time = NOW();
143 if ( !is_idle_domain(d) )
144 {
145 set_bit(_VPF_down, &v->pause_flags);
146 v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
147 }
149 if ( sched_init_vcpu(v, cpu_id) != 0 )
150 {
151 free_vcpu_struct(v);
152 return NULL;
153 }
155 if ( vcpu_initialise(v) != 0 )
156 {
157 sched_destroy_vcpu(v);
158 free_vcpu_struct(v);
159 return NULL;
160 }
162 d->vcpu[vcpu_id] = v;
163 if ( vcpu_id != 0 )
164 d->vcpu[v->vcpu_id-1]->next_in_list = v;
166 /* Must be called after making new vcpu visible to for_each_vcpu(). */
167 vcpu_check_shutdown(v);
169 return v;
170 }
172 struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
173 {
174 struct domain *d;
175 struct vcpu *v;
176 unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
178 if ( (v = idle_vcpu[cpu_id]) != NULL )
179 return v;
181 d = (vcpu_id == 0) ?
182 domain_create(IDLE_DOMAIN_ID, 0, 0) :
183 idle_vcpu[cpu_id - vcpu_id]->domain;
184 BUG_ON(d == NULL);
186 v = alloc_vcpu(d, vcpu_id, cpu_id);
187 idle_vcpu[cpu_id] = v;
189 return v;
190 }
192 struct domain *domain_create(
193 domid_t domid, unsigned int domcr_flags, ssidref_t ssidref)
194 {
195 struct domain *d, **pd;
196 enum { INIT_xsm = 1u<<0, INIT_rangeset = 1u<<1, INIT_evtchn = 1u<<2,
197 INIT_gnttab = 1u<<3, INIT_arch = 1u<<4 };
198 int init_status = 0;
200 if ( (d = alloc_domain_struct()) == NULL )
201 return NULL;
203 memset(d, 0, sizeof(*d));
204 d->domain_id = domid;
206 if ( xsm_alloc_security_domain(d) != 0 )
207 goto fail;
208 init_status |= INIT_xsm;
210 atomic_set(&d->refcnt, 1);
211 spin_lock_init(&d->domain_lock);
212 spin_lock_init(&d->page_alloc_lock);
213 spin_lock_init(&d->shutdown_lock);
214 spin_lock_init(&d->hypercall_deadlock_mutex);
215 INIT_LIST_HEAD(&d->page_list);
216 INIT_LIST_HEAD(&d->xenpage_list);
218 if ( domcr_flags & DOMCRF_hvm )
219 d->is_hvm = 1;
221 if ( (domid == 0) && opt_dom0_vcpus_pin )
222 d->is_pinned = 1;
224 if ( domcr_flags & DOMCRF_dummy )
225 return d;
227 rangeset_domain_initialise(d);
228 init_status |= INIT_rangeset;
230 if ( !is_idle_domain(d) )
231 {
232 if ( xsm_domain_create(d, ssidref) != 0 )
233 goto fail;
235 d->is_paused_by_controller = 1;
236 atomic_inc(&d->pause_count);
238 if ( evtchn_init(d) != 0 )
239 goto fail;
240 init_status |= INIT_evtchn;
242 if ( grant_table_create(d) != 0 )
243 goto fail;
244 init_status |= INIT_gnttab;
245 }
247 if ( arch_domain_create(d, domcr_flags) != 0 )
248 goto fail;
249 init_status |= INIT_arch;
251 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
252 d->irq_caps = rangeset_new(d, "Interrupts", 0);
253 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
254 goto fail;
256 if ( sched_init_domain(d) != 0 )
257 goto fail;
259 if ( !is_idle_domain(d) )
260 {
261 spin_lock(&domlist_update_lock);
262 pd = &domain_list; /* NB. domain_list maintained in order of domid. */
263 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
264 if ( (*pd)->domain_id > d->domain_id )
265 break;
266 d->next_in_list = *pd;
267 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
268 rcu_assign_pointer(*pd, d);
269 rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
270 spin_unlock(&domlist_update_lock);
271 }
273 return d;
275 fail:
276 d->is_dying = DOMDYING_dead;
277 atomic_set(&d->refcnt, DOMAIN_DESTROYED);
278 if ( init_status & INIT_arch )
279 arch_domain_destroy(d);
280 if ( init_status & INIT_gnttab )
281 grant_table_destroy(d);
282 if ( init_status & INIT_evtchn )
283 evtchn_destroy(d);
284 if ( init_status & INIT_rangeset )
285 rangeset_domain_destroy(d);
286 if ( init_status & INIT_xsm )
287 xsm_free_security_domain(d);
288 free_domain_struct(d);
289 return NULL;
290 }
293 struct domain *get_domain_by_id(domid_t dom)
294 {
295 struct domain *d;
297 rcu_read_lock(&domlist_read_lock);
299 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
300 d != NULL;
301 d = rcu_dereference(d->next_in_hashbucket) )
302 {
303 if ( d->domain_id == dom )
304 {
305 if ( unlikely(!get_domain(d)) )
306 d = NULL;
307 break;
308 }
309 }
311 rcu_read_unlock(&domlist_read_lock);
313 return d;
314 }
317 struct domain *rcu_lock_domain_by_id(domid_t dom)
318 {
319 struct domain *d;
321 rcu_read_lock(&domlist_read_lock);
323 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
324 d != NULL;
325 d = rcu_dereference(d->next_in_hashbucket) )
326 {
327 if ( d->domain_id == dom )
328 return d;
329 }
331 rcu_read_unlock(&domlist_read_lock);
333 return NULL;
334 }
337 int domain_kill(struct domain *d)
338 {
339 int rc = 0;
341 if ( d == current->domain )
342 return -EINVAL;
344 /* Protected by domctl_lock. */
345 switch ( d->is_dying )
346 {
347 case DOMDYING_alive:
348 domain_pause(d);
349 d->is_dying = DOMDYING_dying;
350 spin_barrier(&d->domain_lock);
351 evtchn_destroy(d);
352 gnttab_release_mappings(d);
353 /* fallthrough */
354 case DOMDYING_dying:
355 rc = domain_relinquish_resources(d);
356 page_scrub_kick();
357 if ( rc != 0 )
358 {
359 BUG_ON(rc != -EAGAIN);
360 break;
361 }
362 d->is_dying = DOMDYING_dead;
363 put_domain(d);
364 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
365 /* fallthrough */
366 case DOMDYING_dead:
367 break;
368 }
370 return rc;
371 }
374 void __domain_crash(struct domain *d)
375 {
376 if ( d->is_shutting_down )
377 {
378 /* Print nothing: the domain is already shutting down. */
379 }
380 else if ( d == current->domain )
381 {
382 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
383 d->domain_id, current->vcpu_id, smp_processor_id());
384 show_execution_state(guest_cpu_user_regs());
385 }
386 else
387 {
388 printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
389 d->domain_id, current->domain->domain_id, smp_processor_id());
390 }
392 domain_shutdown(d, SHUTDOWN_crash);
393 }
396 void __domain_crash_synchronous(void)
397 {
398 __domain_crash(current->domain);
400 /*
401 * Flush multicall state before dying if a multicall is in progress.
402 * This shouldn't be necessary, but some architectures are calling
403 * domain_crash_synchronous() when they really shouldn't (i.e., from
404 * within hypercall context).
405 */
406 if ( this_cpu(mc_state).flags != 0 )
407 {
408 dprintk(XENLOG_ERR,
409 "FIXME: synchronous domain crash during a multicall!\n");
410 this_cpu(mc_state).flags = 0;
411 }
413 vcpu_end_shutdown_deferral(current);
415 for ( ; ; )
416 do_softirq();
417 }
420 void domain_shutdown(struct domain *d, u8 reason)
421 {
422 struct vcpu *v;
424 if ( d->domain_id == 0 )
425 dom0_shutdown(reason);
427 spin_lock(&d->shutdown_lock);
429 if ( d->is_shutting_down )
430 {
431 spin_unlock(&d->shutdown_lock);
432 return;
433 }
435 d->is_shutting_down = 1;
436 d->shutdown_code = reason;
438 smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */
440 for_each_vcpu ( d, v )
441 {
442 if ( v->defer_shutdown )
443 continue;
444 vcpu_pause_nosync(v);
445 v->paused_for_shutdown = 1;
446 }
448 __domain_finalise_shutdown(d);
450 spin_unlock(&d->shutdown_lock);
451 }
453 void domain_resume(struct domain *d)
454 {
455 struct vcpu *v;
457 /*
458 * Some code paths assume that shutdown status does not get reset under
459 * their feet (e.g., some assertions make this assumption).
460 */
461 domain_pause(d);
463 spin_lock(&d->shutdown_lock);
465 d->is_shutting_down = d->is_shut_down = 0;
467 for_each_vcpu ( d, v )
468 {
469 if ( v->paused_for_shutdown )
470 vcpu_unpause(v);
471 v->paused_for_shutdown = 0;
472 }
474 spin_unlock(&d->shutdown_lock);
476 domain_unpause(d);
477 }
479 int vcpu_start_shutdown_deferral(struct vcpu *v)
480 {
481 if ( v->defer_shutdown )
482 return 1;
484 v->defer_shutdown = 1;
485 smp_mb(); /* set deferral status /then/ check for shutdown */
486 if ( unlikely(v->domain->is_shutting_down) )
487 vcpu_check_shutdown(v);
489 return v->defer_shutdown;
490 }
492 void vcpu_end_shutdown_deferral(struct vcpu *v)
493 {
494 v->defer_shutdown = 0;
495 smp_mb(); /* clear deferral status /then/ check for shutdown */
496 if ( unlikely(v->domain->is_shutting_down) )
497 vcpu_check_shutdown(v);
498 }
500 void domain_pause_for_debugger(void)
501 {
502 struct domain *d = current->domain;
503 struct vcpu *v;
505 atomic_inc(&d->pause_count);
506 if ( test_and_set_bool(d->is_paused_by_controller) )
507 domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */
509 for_each_vcpu ( d, v )
510 vcpu_sleep_nosync(v);
512 send_guest_global_virq(dom0, VIRQ_DEBUGGER);
513 }
515 /* Complete domain destroy after RCU readers are not holding old references. */
516 static void complete_domain_destroy(struct rcu_head *head)
517 {
518 struct domain *d = container_of(head, struct domain, rcu);
519 struct vcpu *v;
520 int i;
522 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
523 {
524 if ( (v = d->vcpu[i]) == NULL )
525 continue;
526 vcpu_destroy(v);
527 sched_destroy_vcpu(v);
528 }
530 rangeset_domain_destroy(d);
532 grant_table_destroy(d);
534 arch_domain_destroy(d);
536 sched_destroy_domain(d);
538 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
539 if ( (v = d->vcpu[i]) != NULL )
540 free_vcpu_struct(v);
542 if ( d->target != NULL )
543 put_domain(d->target);
545 xsm_free_security_domain(d);
546 free_domain_struct(d);
548 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
549 }
551 /* Release resources belonging to task @p. */
552 void domain_destroy(struct domain *d)
553 {
554 struct domain **pd;
555 atomic_t old, new;
557 BUG_ON(!d->is_dying);
559 /* May be already destroyed, or get_domain() can race us. */
560 _atomic_set(old, 0);
561 _atomic_set(new, DOMAIN_DESTROYED);
562 old = atomic_compareandswap(old, new, &d->refcnt);
563 if ( _atomic_read(old) != 0 )
564 return;
566 /* Delete from task list and task hashtable. */
567 spin_lock(&domlist_update_lock);
568 pd = &domain_list;
569 while ( *pd != d )
570 pd = &(*pd)->next_in_list;
571 rcu_assign_pointer(*pd, d->next_in_list);
572 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
573 while ( *pd != d )
574 pd = &(*pd)->next_in_hashbucket;
575 rcu_assign_pointer(*pd, d->next_in_hashbucket);
576 spin_unlock(&domlist_update_lock);
578 /* Schedule RCU asynchronous completion of domain destroy. */
579 call_rcu(&d->rcu, complete_domain_destroy);
580 }
582 void vcpu_pause(struct vcpu *v)
583 {
584 ASSERT(v != current);
585 atomic_inc(&v->pause_count);
586 vcpu_sleep_sync(v);
587 }
589 void vcpu_pause_nosync(struct vcpu *v)
590 {
591 atomic_inc(&v->pause_count);
592 vcpu_sleep_nosync(v);
593 }
595 void vcpu_unpause(struct vcpu *v)
596 {
597 if ( atomic_dec_and_test(&v->pause_count) )
598 vcpu_wake(v);
599 }
601 void domain_pause(struct domain *d)
602 {
603 struct vcpu *v;
605 ASSERT(d != current->domain);
607 atomic_inc(&d->pause_count);
609 for_each_vcpu( d, v )
610 vcpu_sleep_sync(v);
611 }
613 void domain_unpause(struct domain *d)
614 {
615 struct vcpu *v;
617 if ( atomic_dec_and_test(&d->pause_count) )
618 for_each_vcpu( d, v )
619 vcpu_wake(v);
620 }
622 void domain_pause_by_systemcontroller(struct domain *d)
623 {
624 domain_pause(d);
625 if ( test_and_set_bool(d->is_paused_by_controller) )
626 domain_unpause(d);
627 }
629 void domain_unpause_by_systemcontroller(struct domain *d)
630 {
631 if ( test_and_clear_bool(d->is_paused_by_controller) )
632 domain_unpause(d);
633 }
635 int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt)
636 {
637 struct vcpu *v = d->vcpu[vcpuid];
639 BUG_ON(v->is_initialised);
641 return arch_set_info_guest(v, ctxt);
642 }
644 void vcpu_reset(struct vcpu *v)
645 {
646 struct domain *d = v->domain;
648 vcpu_pause(v);
649 domain_lock(d);
651 arch_vcpu_reset(v);
653 set_bit(_VPF_down, &v->pause_flags);
655 v->fpu_initialised = 0;
656 v->fpu_dirtied = 0;
657 v->is_polling = 0;
658 v->is_initialised = 0;
659 v->nmi_pending = 0;
660 v->nmi_masked = 0;
661 clear_bit(_VPF_blocked, &v->pause_flags);
663 domain_unlock(v->domain);
664 vcpu_unpause(v);
665 }
668 long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
669 {
670 struct domain *d = current->domain;
671 struct vcpu *v;
672 struct vcpu_guest_context *ctxt;
673 long rc = 0;
675 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
676 return -EINVAL;
678 if ( (v = d->vcpu[vcpuid]) == NULL )
679 return -ENOENT;
681 switch ( cmd )
682 {
683 case VCPUOP_initialise:
684 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
685 return -ENOMEM;
687 if ( copy_from_guest(ctxt, arg, 1) )
688 {
689 xfree(ctxt);
690 return -EFAULT;
691 }
693 domain_lock(d);
694 rc = -EEXIST;
695 if ( !v->is_initialised )
696 rc = boot_vcpu(d, vcpuid, ctxt);
697 domain_unlock(d);
699 xfree(ctxt);
700 break;
702 case VCPUOP_up:
703 if ( !v->is_initialised )
704 return -EINVAL;
706 if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
707 vcpu_wake(v);
709 break;
711 case VCPUOP_down:
712 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
713 vcpu_sleep_nosync(v);
714 break;
716 case VCPUOP_is_up:
717 rc = !test_bit(_VPF_down, &v->pause_flags);
718 break;
720 case VCPUOP_get_runstate_info:
721 {
722 struct vcpu_runstate_info runstate;
723 vcpu_runstate_get(v, &runstate);
724 if ( copy_to_guest(arg, &runstate, 1) )
725 rc = -EFAULT;
726 break;
727 }
729 case VCPUOP_set_periodic_timer:
730 {
731 struct vcpu_set_periodic_timer set;
733 if ( copy_from_guest(&set, arg, 1) )
734 return -EFAULT;
736 if ( set.period_ns < MILLISECS(1) )
737 return -EINVAL;
739 v->periodic_period = set.period_ns;
740 vcpu_force_reschedule(v);
742 break;
743 }
745 case VCPUOP_stop_periodic_timer:
746 v->periodic_period = 0;
747 vcpu_force_reschedule(v);
748 break;
750 case VCPUOP_set_singleshot_timer:
751 {
752 struct vcpu_set_singleshot_timer set;
754 if ( v != current )
755 return -EINVAL;
757 if ( copy_from_guest(&set, arg, 1) )
758 return -EFAULT;
760 if ( (set.flags & VCPU_SSHOTTMR_future) &&
761 (set.timeout_abs_ns < NOW()) )
762 return -ETIME;
764 if ( v->singleshot_timer.cpu != smp_processor_id() )
765 {
766 stop_timer(&v->singleshot_timer);
767 v->singleshot_timer.cpu = smp_processor_id();
768 }
770 set_timer(&v->singleshot_timer, set.timeout_abs_ns);
772 break;
773 }
775 case VCPUOP_stop_singleshot_timer:
776 if ( v != current )
777 return -EINVAL;
779 stop_timer(&v->singleshot_timer);
781 break;
783 case VCPUOP_send_nmi:
784 if ( !guest_handle_is_null(arg) )
785 return -EINVAL;
787 if ( !test_and_set_bool(v->nmi_pending) )
788 vcpu_kick(v);
790 break;
792 default:
793 rc = arch_do_vcpu_op(cmd, v, arg);
794 break;
795 }
797 return rc;
798 }
800 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
801 {
802 if ( type > MAX_VMASST_TYPE )
803 return -EINVAL;
805 switch ( cmd )
806 {
807 case VMASST_CMD_enable:
808 set_bit(type, &p->vm_assist);
809 return 0;
810 case VMASST_CMD_disable:
811 clear_bit(type, &p->vm_assist);
812 return 0;
813 }
815 return -ENOSYS;
816 }
818 /*
819 * Local variables:
820 * mode: C
821 * c-set-style: "BSD"
822 * c-basic-offset: 4
823 * tab-width: 4
824 * indent-tabs-mode: nil
825 * End:
826 */