ia64/xen-unstable

view xen/acm/acm_simple_type_enforcement_hooks.c @ 15640:f45c9f122e0e

[ACM] Check boundary conditions of passed parameters before accessing
an array with them.
Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
author kfraser@localhost.localdomain
date Mon Jul 23 09:56:49 2007 +0100 (2007-07-23)
parents dae6a2790f6b
children bf512fde6667
line source
1 /****************************************************************
2 * acm_simple_type_enforcement_hooks.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 *
6 * Author:
7 * Reiner Sailer <sailer@watson.ibm.com>
8 *
9 * Contributors:
10 * Stefan Berger <stefanb@watson.ibm.com>
11 * support for network order binary policies
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation, version 2 of the
16 * License.
17 *
18 * sHype Simple Type Enforcement for Xen
19 * STE allows to control which domains can setup sharing
20 * (eventchannels right now) with which other domains. Hooks
21 * are defined and called throughout Xen when domains bind to
22 * shared resources (setup eventchannels) and a domain is allowed
23 * to setup sharing with another domain if and only if both domains
24 * share at least on common type.
25 *
26 */
28 #include <xen/lib.h>
29 #include <asm/types.h>
30 #include <asm/current.h>
31 #include <acm/acm_hooks.h>
32 #include <asm/atomic.h>
33 #include <acm/acm_endian.h>
34 #include <acm/acm_core.h>
36 ssidref_t dom0_ste_ssidref = 0x0001;
38 /* local cache structures for STE policy */
39 struct ste_binary_policy ste_bin_pol;
41 static inline int have_common_type (ssidref_t ref1, ssidref_t ref2) {
42 int i;
44 if ( ref1 >= 0 && ref1 < ste_bin_pol.max_ssidrefs &&
45 ref2 >= 0 && ref2 < ste_bin_pol.max_ssidrefs )
46 {
47 for( i = 0; i< ste_bin_pol.max_types; i++ )
48 if ( ste_bin_pol.ssidrefs[ref1*ste_bin_pol.max_types + i] &&
49 ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i])
50 {
51 printkd("%s: common type #%02x.\n", __func__, i);
52 return 1;
53 }
54 }
55 return 0;
56 }
58 /* Helper function: return = (subj and obj share a common type) */
59 static int share_common_type(struct domain *subj, struct domain *obj)
60 {
61 ssidref_t ref_s, ref_o;
62 int ret;
64 if ( (subj == NULL) || (obj == NULL) ||
65 (subj->ssid == NULL) || (obj->ssid == NULL) )
66 return 0;
68 read_lock(&acm_bin_pol_rwlock);
70 /* lookup the policy-local ssids */
71 ref_s = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
72 (struct acm_ssid_domain *)subj->ssid)))->ste_ssidref;
73 ref_o = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
74 (struct acm_ssid_domain *)obj->ssid)))->ste_ssidref;
75 /* check whether subj and obj share a common ste type */
76 ret = have_common_type(ref_s, ref_o);
78 read_unlock(&acm_bin_pol_rwlock);
80 return ret;
81 }
83 /*
84 * Initializing STE policy (will be filled by policy partition
85 * using setpolicy command)
86 */
87 int acm_init_ste_policy(void)
88 {
89 /* minimal startup policy; policy write-locked already */
90 ste_bin_pol.max_types = 1;
91 ste_bin_pol.max_ssidrefs = 1 + dom0_ste_ssidref;
92 ste_bin_pol.ssidrefs =
93 (domaintype_t *)xmalloc_array(domaintype_t,
94 ste_bin_pol.max_types *
95 ste_bin_pol.max_ssidrefs);
97 if (ste_bin_pol.ssidrefs == NULL)
98 return ACM_INIT_SSID_ERROR;
100 memset(ste_bin_pol.ssidrefs, 0, sizeof(domaintype_t) *
101 ste_bin_pol.max_types *
102 ste_bin_pol.max_ssidrefs);
104 /* initialize state so that dom0 can start up and communicate with itself */
105 ste_bin_pol.ssidrefs[ste_bin_pol.max_types * dom0_ste_ssidref] = 1;
107 /* init stats */
108 atomic_set(&(ste_bin_pol.ec_eval_count), 0);
109 atomic_set(&(ste_bin_pol.ec_denied_count), 0);
110 atomic_set(&(ste_bin_pol.ec_cachehit_count), 0);
111 atomic_set(&(ste_bin_pol.gt_eval_count), 0);
112 atomic_set(&(ste_bin_pol.gt_denied_count), 0);
113 atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
115 return ACM_OK;
116 }
119 /* ste initialization function hooks */
120 static int
121 ste_init_domain_ssid(void **ste_ssid, ssidref_t ssidref)
122 {
123 int i;
124 struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid);
126 if ( ste_ssidp == NULL )
127 return ACM_INIT_SSID_ERROR;
129 /* get policy-local ssid reference */
130 ste_ssidp->ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
131 ssidref);
133 if ( (ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) )
134 {
135 printkd("%s: ERROR ste_ssidref (%x) undefined or unset (0).\n",
136 __func__, ste_ssidp->ste_ssidref);
137 xfree(ste_ssidp);
138 return ACM_INIT_SSID_ERROR;
139 }
140 /* clean ste cache */
141 for ( i = 0; i < ACM_TE_CACHE_SIZE; i++ )
142 ste_ssidp->ste_cache[i].valid = ACM_STE_free;
144 (*ste_ssid) = ste_ssidp;
145 printkd("%s: determined ste_ssidref to %x.\n",
146 __func__, ste_ssidp->ste_ssidref);
148 return ACM_OK;
149 }
152 static void
153 ste_free_domain_ssid(void *ste_ssid)
154 {
155 xfree(ste_ssid);
156 return;
157 }
159 /* dump type enforcement cache; policy read-locked already */
160 static int
161 ste_dump_policy(u8 *buf, u32 buf_size) {
162 struct acm_ste_policy_buffer *ste_buf =
163 (struct acm_ste_policy_buffer *)buf;
164 int ret = 0;
166 if ( buf_size < sizeof(struct acm_ste_policy_buffer) )
167 return -EINVAL;
169 ste_buf->ste_max_types = cpu_to_be32(ste_bin_pol.max_types);
170 ste_buf->ste_max_ssidrefs = cpu_to_be32(ste_bin_pol.max_ssidrefs);
171 ste_buf->policy_code = cpu_to_be32(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
172 ste_buf->ste_ssid_offset =
173 cpu_to_be32(sizeof(struct acm_ste_policy_buffer));
174 ret = be32_to_cpu(ste_buf->ste_ssid_offset) +
175 sizeof(domaintype_t)*ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types;
177 ret = (ret + 7) & ~7;
179 if (buf_size < ret)
180 return -EINVAL;
182 /* now copy buffer over */
183 arrcpy(buf + be32_to_cpu(ste_buf->ste_ssid_offset),
184 ste_bin_pol.ssidrefs,
185 sizeof(domaintype_t),
186 ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types);
188 return ret;
189 }
191 /*
192 * ste_init_state is called when a policy is changed to detect violations
193 * (return != 0). from a security point of view, we simulate that all
194 * running domains are re-started and all sharing decisions are replayed
195 * to detect violations or current sharing behavior (right now:
196 * event_channels, future: also grant_tables)
197 */
198 static int
199 ste_init_state(struct acm_sized_buffer *errors)
200 {
201 int violation = 1;
202 struct ste_ssid *ste_ssid, *ste_rssid;
203 ssidref_t ste_ssidref, ste_rssidref;
204 struct domain *d, *rdom;
205 domid_t rdomid;
206 struct active_grant_entry *act;
207 int port, i;
209 rcu_read_lock(&domlist_read_lock);
210 read_lock(&ssid_list_rwlock);
212 /* go through all domains and adjust policy as if this domain was
213 started now */
215 for_each_domain ( d )
216 {
217 struct evtchn *ports;
218 unsigned int bucket;
220 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
221 (struct acm_ssid_domain *)d->ssid);
222 ste_ssidref = ste_ssid->ste_ssidref;
223 traceprintk("%s: validating policy for eventch domain %x (ste-Ref=%x).\n",
224 __func__, d->domain_id, ste_ssidref);
225 /* a) check for event channel conflicts */
226 for ( bucket = 0; bucket < NR_EVTCHN_BUCKETS; bucket++ )
227 {
228 spin_lock(&d->evtchn_lock);
229 ports = d->evtchn[bucket];
230 if ( ports == NULL)
231 {
232 spin_unlock(&d->evtchn_lock);
233 break;
234 }
236 for ( port = 0; port < EVTCHNS_PER_BUCKET; port++ )
237 {
238 if ( ports[port].state == ECS_INTERDOMAIN )
239 {
240 rdom = ports[port].u.interdomain.remote_dom;
241 rdomid = rdom->domain_id;
242 } else {
243 continue; /* port unused */
244 }
246 /* rdom now has remote domain */
247 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
248 (struct acm_ssid_domain *)(rdom->ssid));
249 ste_rssidref = ste_rssid->ste_ssidref;
250 traceprintk("%s: eventch: domain %x (ssidref %x) --> "
251 "domain %x (rssidref %x) used (port %x).\n",
252 __func__, d->domain_id, ste_ssidref,
253 rdom->domain_id, ste_rssidref, port);
254 /* check whether on subj->ssid, obj->ssid share a common type*/
255 if ( ! have_common_type(ste_ssidref, ste_rssidref) )
256 {
257 printkd("%s: Policy violation in event channel domain "
258 "%x -> domain %x.\n",
259 __func__, d->domain_id, rdomid);
260 spin_unlock(&d->evtchn_lock);
262 acm_array_append_tuple(errors,
263 ACM_EVTCHN_SHARING_VIOLATION,
264 d->domain_id << 16 | rdomid);
265 goto out;
266 }
267 }
268 spin_unlock(&d->evtchn_lock);
269 }
272 /* b) check for grant table conflicts on shared pages */
273 spin_lock(&d->grant_table->lock);
274 for ( i = 0; i < nr_active_grant_frames(d->grant_table); i++ )
275 {
276 #define APP (PAGE_SIZE / sizeof(struct active_grant_entry))
277 act = &d->grant_table->active[i/APP][i%APP];
278 if ( act->pin != 0 ) {
279 printkd("%s: grant dom (%hu) SHARED (%d) pin (%d) "
280 "dom:(%hu) frame:(%lx)\n",
281 __func__, d->domain_id, i, act->pin,
282 act->domid, (unsigned long)act->frame);
283 rdomid = act->domid;
284 if ( (rdom = rcu_lock_domain_by_id(rdomid)) == NULL )
285 {
286 spin_unlock(&d->grant_table->lock);
287 printkd("%s: domain not found ERROR!\n", __func__);
289 acm_array_append_tuple(errors,
290 ACM_DOMAIN_LOOKUP,
291 rdomid);
292 goto out;
293 }
294 /* rdom now has remote domain */
295 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
296 (struct acm_ssid_domain *)(rdom->ssid));
297 ste_rssidref = ste_rssid->ste_ssidref;
298 rcu_unlock_domain(rdom);
299 if ( ! have_common_type(ste_ssidref, ste_rssidref) )
300 {
301 spin_unlock(&d->grant_table->lock);
302 printkd("%s: Policy violation in grant table "
303 "sharing domain %x -> domain %x.\n",
304 __func__, d->domain_id, rdomid);
306 acm_array_append_tuple(errors,
307 ACM_GNTTAB_SHARING_VIOLATION,
308 d->domain_id << 16 | rdomid);
309 goto out;
310 }
311 }
312 }
313 spin_unlock(&d->grant_table->lock);
314 }
315 violation = 0;
316 out:
317 read_unlock(&ssid_list_rwlock);
318 rcu_read_unlock(&domlist_read_lock);
319 return violation;
320 /*
321 returning "violation != 0" means that existing sharing between domains
322 would not have been allowed if the new policy had been enforced before
323 the sharing; for ste, this means that there are at least 2 domains
324 that have established sharing through event-channels or grant-tables
325 but these two domains don't have no longer a common type in their
326 typesets referenced by their ssidrefs
327 */
328 }
331 /*
332 * Call ste_init_state with the current policy.
333 */
334 int
335 do_ste_init_state_curr(struct acm_sized_buffer *errors)
336 {
337 return ste_init_state(errors);
338 }
341 /* set new policy; policy write-locked already */
342 static int
343 _ste_update_policy(u8 *buf, u32 buf_size, int test_only,
344 struct acm_sized_buffer *errors)
345 {
346 int rc = -EFAULT;
347 struct acm_ste_policy_buffer *ste_buf =
348 (struct acm_ste_policy_buffer *)buf;
349 void *ssidrefsbuf;
350 struct ste_ssid *ste_ssid;
351 struct acm_ssid_domain *rawssid;
352 int i;
355 /* 1. create and copy-in new ssidrefs buffer */
356 ssidrefsbuf = xmalloc_array(u8,
357 sizeof(domaintype_t) *
358 ste_buf->ste_max_types *
359 ste_buf->ste_max_ssidrefs);
360 if ( ssidrefsbuf == NULL ) {
361 return -ENOMEM;
362 }
363 if ( ste_buf->ste_ssid_offset +
364 sizeof(domaintype_t) *
365 ste_buf->ste_max_ssidrefs *
366 ste_buf->ste_max_types > buf_size )
367 goto error_free;
369 arrcpy(ssidrefsbuf,
370 buf + ste_buf->ste_ssid_offset,
371 sizeof(domaintype_t),
372 ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
375 /*
376 * 3. in test mode: re-calculate sharing decisions based on running
377 * domains; this can fail if new policy is conflicting with sharing
378 * of running domains
379 * now: reject violating new policy; future: adjust sharing through
380 * revoking sharing
381 */
383 if ( test_only ) {
384 /* temporarily replace old policy with new one for the testing */
385 struct ste_binary_policy orig_ste_bin_pol = ste_bin_pol;
386 ste_bin_pol.max_types = ste_buf->ste_max_types;
387 ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
388 ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
390 if ( ste_init_state(NULL) )
391 {
392 /* new policy conflicts with sharing of running domains */
393 printk("%s: New policy conflicts with running domains. "
394 "Policy load aborted.\n", __func__);
395 } else {
396 rc = ACM_OK;
397 }
398 /* revert changes, no matter whether testing was successful or not */
399 ste_bin_pol = orig_ste_bin_pol;
400 goto error_free;
401 }
403 /* 3. replace old policy (activate new policy) */
404 ste_bin_pol.max_types = ste_buf->ste_max_types;
405 ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
406 xfree(ste_bin_pol.ssidrefs);
407 ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
409 /* clear all ste caches */
410 read_lock(&ssid_list_rwlock);
412 for_each_acmssid( rawssid )
413 {
414 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, rawssid);
415 for ( i = 0; i < ACM_TE_CACHE_SIZE; i++ )
416 ste_ssid->ste_cache[i].valid = ACM_STE_free;
417 }
419 read_unlock(&ssid_list_rwlock);
421 return ACM_OK;
423 error_free:
424 if ( !test_only )
425 printk("%s: ERROR setting policy.\n", __func__);
426 xfree(ssidrefsbuf);
427 return rc;
428 }
430 static int
431 ste_test_policy(u8 *buf, u32 buf_size, int is_bootpolicy,
432 struct acm_sized_buffer *errors)
433 {
434 struct acm_ste_policy_buffer *ste_buf =
435 (struct acm_ste_policy_buffer *)buf;
437 if ( buf_size < sizeof(struct acm_ste_policy_buffer) )
438 return -EINVAL;
440 /* Convert endianess of policy */
441 ste_buf->policy_code = be32_to_cpu(ste_buf->policy_code);
442 ste_buf->policy_version = be32_to_cpu(ste_buf->policy_version);
443 ste_buf->ste_max_types = be32_to_cpu(ste_buf->ste_max_types);
444 ste_buf->ste_max_ssidrefs = be32_to_cpu(ste_buf->ste_max_ssidrefs);
445 ste_buf->ste_ssid_offset = be32_to_cpu(ste_buf->ste_ssid_offset);
447 /* policy type and version checks */
448 if ( (ste_buf->policy_code != ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) ||
449 (ste_buf->policy_version != ACM_STE_VERSION) )
450 return -EINVAL;
452 /* during boot dom0_chwall_ssidref is set */
453 if ( is_bootpolicy && (dom0_ste_ssidref >= ste_buf->ste_max_ssidrefs) )
454 return -EINVAL;
456 return _ste_update_policy(buf, buf_size, 1, errors);
457 }
459 static int
460 ste_set_policy(u8 *buf, u32 buf_size)
461 {
462 return _ste_update_policy(buf, buf_size, 0, NULL);
463 }
465 static int
466 ste_dump_stats(u8 *buf, u16 buf_len)
467 {
468 struct acm_ste_stats_buffer stats;
470 /* now send the hook counts to user space */
471 stats.ec_eval_count =
472 cpu_to_be32(atomic_read(&ste_bin_pol.ec_eval_count));
473 stats.gt_eval_count =
474 cpu_to_be32(atomic_read(&ste_bin_pol.gt_eval_count));
475 stats.ec_denied_count =
476 cpu_to_be32(atomic_read(&ste_bin_pol.ec_denied_count));
477 stats.gt_denied_count =
478 cpu_to_be32(atomic_read(&ste_bin_pol.gt_denied_count));
479 stats.ec_cachehit_count =
480 cpu_to_be32(atomic_read(&ste_bin_pol.ec_cachehit_count));
481 stats.gt_cachehit_count =
482 cpu_to_be32(atomic_read(&ste_bin_pol.gt_cachehit_count));
484 if ( buf_len < sizeof(struct acm_ste_stats_buffer) )
485 return -ENOMEM;
487 memcpy(buf, &stats, sizeof(struct acm_ste_stats_buffer));
489 return sizeof(struct acm_ste_stats_buffer);
490 }
492 static int
493 ste_dump_ssid_types(ssidref_t ssidref, u8 *buf, u16 len)
494 {
495 int i;
497 /* fill in buffer */
498 if ( ste_bin_pol.max_types > len )
499 return -EFAULT;
501 if ( ssidref >= ste_bin_pol.max_ssidrefs )
502 return -EFAULT;
504 /* read types for chwall ssidref */
505 for( i = 0; i< ste_bin_pol.max_types; i++ )
506 {
507 if (ste_bin_pol.ssidrefs[ssidref * ste_bin_pol.max_types + i])
508 buf[i] = 1;
509 else
510 buf[i] = 0;
511 }
512 return ste_bin_pol.max_types;
513 }
515 /* we need to go through this before calling the hooks,
516 * returns 1 == cache hit */
517 static int inline
518 check_cache(struct domain *dom, domid_t rdom)
519 {
520 struct ste_ssid *ste_ssid;
521 int i;
523 printkd("checking cache: %x --> %x.\n", dom->domain_id, rdom);
525 if (dom->ssid == NULL)
526 return 0;
527 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
528 (struct acm_ssid_domain *)(dom->ssid));
530 for( i = 0; i < ACM_TE_CACHE_SIZE; i++ )
531 {
532 if ( (ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
533 (ste_ssid->ste_cache[i].id == rdom) )
534 {
535 printkd("cache hit (entry %x, id= %x!\n",
536 i,
537 ste_ssid->ste_cache[i].id);
538 return 1;
539 }
540 }
541 return 0;
542 }
545 /* we only get here if there is NO entry yet; no duplication check! */
546 static void inline
547 cache_result(struct domain *subj, struct domain *obj) {
548 struct ste_ssid *ste_ssid;
549 int i;
551 printkd("caching from doms: %x --> %x.\n",
552 subj->domain_id, obj->domain_id);
554 if ( subj->ssid == NULL )
555 return;
557 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
558 (struct acm_ssid_domain *)(subj)->ssid);
560 for( i = 0; i < ACM_TE_CACHE_SIZE; i++ )
561 if ( ste_ssid->ste_cache[i].valid == ACM_STE_free )
562 break;
563 if ( i < ACM_TE_CACHE_SIZE )
564 {
565 ste_ssid->ste_cache[i].valid = ACM_STE_valid;
566 ste_ssid->ste_cache[i].id = obj->domain_id;
567 } else
568 printk ("Cache of dom %x is full!\n", subj->domain_id);
569 }
571 /* deletes entries for domain 'id' from all caches (re-use) */
572 static void inline
573 clean_id_from_cache(domid_t id)
574 {
575 struct ste_ssid *ste_ssid;
576 int i;
577 struct acm_ssid_domain *rawssid;
579 printkd("deleting cache for dom %x.\n", id);
581 read_lock(&ssid_list_rwlock);
582 /* look through caches of all domains */
584 for_each_acmssid ( rawssid )
585 {
586 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, rawssid);
588 if ( !ste_ssid )
589 {
590 printk("%s: deleting ID from cache ERROR (no ste_ssid)!\n",
591 __func__);
592 goto out;
593 }
594 for ( i = 0; i < ACM_TE_CACHE_SIZE; i++ )
595 if ( (ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
596 (ste_ssid->ste_cache[i].id == id) )
597 ste_ssid->ste_cache[i].valid = ACM_STE_free;
598 }
600 out:
601 read_unlock(&ssid_list_rwlock);
602 }
604 /***************************
605 * Authorization functions
606 **************************/
607 static int
608 ste_pre_domain_create(void *subject_ssid, ssidref_t ssidref)
609 {
610 /* check for ssidref in range for policy */
611 ssidref_t ste_ssidref;
612 traceprintk("%s.\n", __func__);
614 read_lock(&acm_bin_pol_rwlock);
616 ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
618 if ( ste_ssidref >= ste_bin_pol.max_ssidrefs )
619 {
620 printk("%s: ERROR ste_ssidref > max(%x).\n",
621 __func__, ste_bin_pol.max_ssidrefs-1);
622 read_unlock(&acm_bin_pol_rwlock);
623 return ACM_ACCESS_DENIED;
624 }
626 read_unlock(&acm_bin_pol_rwlock);
628 return ACM_ACCESS_PERMITTED;
629 }
631 static int
632 ste_domain_create(void *subject_ssid, ssidref_t ssidref, domid_t domid)
633 {
634 return ste_pre_domain_create(subject_ssid, ssidref);
635 }
638 static void
639 ste_domain_destroy(void *subject_ssid, struct domain *d)
640 {
641 /* clean all cache entries for destroyed domain (might be re-used) */
642 clean_id_from_cache(d->domain_id);
643 }
645 /* -------- EVENTCHANNEL OPERATIONS -----------*/
646 static int
647 ste_pre_eventchannel_unbound(domid_t id1, domid_t id2) {
648 struct domain *subj, *obj;
649 int ret;
650 traceprintk("%s: dom%x-->dom%x.\n", __func__,
651 (id1 == DOMID_SELF) ? current->domain->domain_id : id1,
652 (id2 == DOMID_SELF) ? current->domain->domain_id : id2);
654 if ( id1 == DOMID_SELF )
655 id1 = current->domain->domain_id;
656 if ( id2 == DOMID_SELF )
657 id2 = current->domain->domain_id;
659 subj = rcu_lock_domain_by_id(id1);
660 obj = rcu_lock_domain_by_id(id2);
661 if ( (subj == NULL) || (obj == NULL) )
662 {
663 ret = ACM_ACCESS_DENIED;
664 goto out;
665 }
666 /* cache check late */
667 if ( check_cache(subj, obj->domain_id) )
668 {
669 atomic_inc(&ste_bin_pol.ec_cachehit_count);
670 ret = ACM_ACCESS_PERMITTED;
671 goto out;
672 }
673 atomic_inc(&ste_bin_pol.ec_eval_count);
675 if ( share_common_type(subj, obj) )
676 {
677 cache_result(subj, obj);
678 ret = ACM_ACCESS_PERMITTED;
679 }
680 else
681 {
682 atomic_inc(&ste_bin_pol.ec_denied_count);
683 ret = ACM_ACCESS_DENIED;
684 }
686 out:
687 if ( obj != NULL )
688 rcu_unlock_domain(obj);
689 if ( subj != NULL )
690 rcu_unlock_domain(subj);
691 return ret;
692 }
694 static int
695 ste_pre_eventchannel_interdomain(domid_t id)
696 {
697 struct domain *subj=NULL, *obj=NULL;
698 int ret;
700 traceprintk("%s: dom%x-->dom%x.\n", __func__,
701 current->domain->domain_id,
702 (id == DOMID_SELF) ? current->domain->domain_id : id);
704 /* following is a bit longer but ensures that we
705 * "put" only domains that we where "find"-ing
706 */
707 if ( id == DOMID_SELF )
708 id = current->domain->domain_id;
710 subj = current->domain;
711 obj = rcu_lock_domain_by_id(id);
712 if ( obj == NULL )
713 {
714 ret = ACM_ACCESS_DENIED;
715 goto out;
716 }
718 /* cache check late, but evtchn is not on performance critical path */
719 if ( check_cache(subj, obj->domain_id) )
720 {
721 atomic_inc(&ste_bin_pol.ec_cachehit_count);
722 ret = ACM_ACCESS_PERMITTED;
723 goto out;
724 }
726 atomic_inc(&ste_bin_pol.ec_eval_count);
728 if ( share_common_type(subj, obj) )
729 {
730 cache_result(subj, obj);
731 ret = ACM_ACCESS_PERMITTED;
732 }
733 else
734 {
735 atomic_inc(&ste_bin_pol.ec_denied_count);
736 ret = ACM_ACCESS_DENIED;
737 }
739 out:
740 if ( obj != NULL )
741 rcu_unlock_domain(obj);
742 return ret;
743 }
745 /* -------- SHARED MEMORY OPERATIONS -----------*/
747 static int
748 ste_pre_grant_map_ref (domid_t id)
749 {
750 struct domain *obj, *subj;
751 int ret;
752 traceprintk("%s: dom%x-->dom%x.\n", __func__,
753 current->domain->domain_id, id);
755 if ( check_cache(current->domain, id) )
756 {
757 atomic_inc(&ste_bin_pol.gt_cachehit_count);
758 return ACM_ACCESS_PERMITTED;
759 }
760 atomic_inc(&ste_bin_pol.gt_eval_count);
761 subj = current->domain;
762 obj = rcu_lock_domain_by_id(id);
764 if ( share_common_type(subj, obj) )
765 {
766 cache_result(subj, obj);
767 ret = ACM_ACCESS_PERMITTED;
768 }
769 else
770 {
771 atomic_inc(&ste_bin_pol.gt_denied_count);
772 printkd("%s: ACCESS DENIED!\n", __func__);
773 ret = ACM_ACCESS_DENIED;
774 }
775 if ( obj != NULL )
776 rcu_unlock_domain(obj);
777 return ret;
778 }
781 /* since setting up grant tables involves some implicit information
782 flow from the creating domain to the domain that is setup, we
783 check types in addition to the general authorization */
784 static int
785 ste_pre_grant_setup (domid_t id)
786 {
787 struct domain *obj, *subj;
788 int ret;
789 traceprintk("%s: dom%x-->dom%x.\n", __func__,
790 current->domain->domain_id, id);
792 if ( check_cache(current->domain, id) )
793 {
794 atomic_inc(&ste_bin_pol.gt_cachehit_count);
795 return ACM_ACCESS_PERMITTED;
796 }
797 atomic_inc(&ste_bin_pol.gt_eval_count);
798 /* a) check authorization (eventually use specific capabilities) */
799 if ( !IS_PRIV(current->domain) )
800 {
801 printk("%s: Grant table management authorization denied ERROR!\n",
802 __func__);
803 return ACM_ACCESS_DENIED;
804 }
805 /* b) check types */
806 subj = current->domain;
807 obj = rcu_lock_domain_by_id(id);
809 if ( share_common_type(subj, obj) )
810 {
811 cache_result(subj, obj);
812 ret = ACM_ACCESS_PERMITTED;
813 }
814 else
815 {
816 atomic_inc(&ste_bin_pol.gt_denied_count);
817 ret = ACM_ACCESS_DENIED;
818 }
819 if ( obj != NULL )
820 rcu_unlock_domain(obj);
821 return ret;
822 }
824 /* -------- DOMAIN-Requested Decision hooks -----------*/
826 static int
827 ste_sharing(ssidref_t ssidref1, ssidref_t ssidref2)
828 {
829 int hct = have_common_type(
830 GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref1),
831 GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref2));
832 return (hct ? ACM_ACCESS_PERMITTED : ACM_ACCESS_DENIED);
833 }
835 static int
836 ste_is_default_policy(void)
837 {
838 return ((ste_bin_pol.max_types == 1) &&
839 (ste_bin_pol.max_ssidrefs == 2));
840 }
842 /* now define the hook structure similarly to LSM */
843 struct acm_operations acm_simple_type_enforcement_ops = {
845 /* policy management services */
846 .init_domain_ssid = ste_init_domain_ssid,
847 .free_domain_ssid = ste_free_domain_ssid,
848 .dump_binary_policy = ste_dump_policy,
849 .test_binary_policy = ste_test_policy,
850 .set_binary_policy = ste_set_policy,
851 .dump_statistics = ste_dump_stats,
852 .dump_ssid_types = ste_dump_ssid_types,
854 /* domain management control hooks */
855 .domain_create = ste_domain_create,
856 .domain_destroy = ste_domain_destroy,
858 /* event channel control hooks */
859 .pre_eventchannel_unbound = ste_pre_eventchannel_unbound,
860 .fail_eventchannel_unbound = NULL,
861 .pre_eventchannel_interdomain = ste_pre_eventchannel_interdomain,
862 .fail_eventchannel_interdomain = NULL,
864 /* grant table control hooks */
865 .pre_grant_map_ref = ste_pre_grant_map_ref,
866 .fail_grant_map_ref = NULL,
867 .pre_grant_setup = ste_pre_grant_setup,
868 .fail_grant_setup = NULL,
869 .sharing = ste_sharing,
871 .is_default_policy = ste_is_default_policy,
872 };
874 /*
875 * Local variables:
876 * mode: C
877 * c-set-style: "BSD"
878 * c-basic-offset: 4
879 * tab-width: 4
880 * indent-tabs-mode: nil
881 * End:
882 */