ia64/xen-unstable

view xen/acm/acm_simple_type_enforcement_hooks.c @ 14858:249de074617f

Added documentation, C bindings, and test for VM_metrics.VCPUs_flags.

Signed-off-by: Ewan Mellor <ewan@xensource.com>
author Ewan Mellor <ewan@xensource.com>
date Fri Apr 13 18:14:06 2007 +0100 (2007-04-13)
parents 4434d1039a65
children a99093e602c6
line source
1 /****************************************************************
2 * acm_simple_type_enforcement_hooks.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 *
6 * Author:
7 * Reiner Sailer <sailer@watson.ibm.com>
8 *
9 * Contributors:
10 * Stefan Berger <stefanb@watson.ibm.com>
11 * support for network order binary policies
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation, version 2 of the
16 * License.
17 *
18 * sHype Simple Type Enforcement for Xen
19 * STE allows to control which domains can setup sharing
20 * (eventchannels right now) with which other domains. Hooks
21 * are defined and called throughout Xen when domains bind to
22 * shared resources (setup eventchannels) and a domain is allowed
23 * to setup sharing with another domain if and only if both domains
24 * share at least on common type.
25 *
26 */
28 #include <xen/lib.h>
29 #include <asm/types.h>
30 #include <asm/current.h>
31 #include <acm/acm_hooks.h>
32 #include <asm/atomic.h>
33 #include <acm/acm_endian.h>
34 #include <acm/acm_core.h>
36 ssidref_t dom0_ste_ssidref = 0x0001;
38 /* local cache structures for STE policy */
39 struct ste_binary_policy ste_bin_pol;
41 static inline int have_common_type (ssidref_t ref1, ssidref_t ref2) {
42 int i;
43 for(i=0; i< ste_bin_pol.max_types; i++)
44 if ( ste_bin_pol.ssidrefs[ref1*ste_bin_pol.max_types + i] &&
45 ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i]) {
46 printkd("%s: common type #%02x.\n", __func__, i);
47 return 1;
48 }
49 return 0;
50 }
52 /* Helper function: return = (subj and obj share a common type) */
53 static int share_common_type(struct domain *subj, struct domain *obj)
54 {
55 ssidref_t ref_s, ref_o;
56 int ret;
58 if ((subj == NULL) || (obj == NULL) || (subj->ssid == NULL) || (obj->ssid == NULL))
59 return 0;
60 read_lock(&acm_bin_pol_rwlock);
61 /* lookup the policy-local ssids */
62 ref_s = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
63 (struct acm_ssid_domain *)subj->ssid)))->ste_ssidref;
64 ref_o = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
65 (struct acm_ssid_domain *)obj->ssid)))->ste_ssidref;
66 /* check whether subj and obj share a common ste type */
67 ret = have_common_type(ref_s, ref_o);
68 read_unlock(&acm_bin_pol_rwlock);
69 return ret;
70 }
72 /*
73 * Initializing STE policy (will be filled by policy partition
74 * using setpolicy command)
75 */
76 int acm_init_ste_policy(void)
77 {
78 /* minimal startup policy; policy write-locked already */
79 ste_bin_pol.max_types = 1;
80 ste_bin_pol.max_ssidrefs = 1 + dom0_ste_ssidref;
81 ste_bin_pol.ssidrefs =
82 (domaintype_t *)xmalloc_array(domaintype_t,
83 ste_bin_pol.max_types *
84 ste_bin_pol.max_ssidrefs);
86 if (ste_bin_pol.ssidrefs == NULL)
87 return ACM_INIT_SSID_ERROR;
89 memset(ste_bin_pol.ssidrefs, 0, sizeof(domaintype_t) *
90 ste_bin_pol.max_types *
91 ste_bin_pol.max_ssidrefs);
93 /* initialize state so that dom0 can start up and communicate with itself */
94 ste_bin_pol.ssidrefs[ste_bin_pol.max_types * dom0_ste_ssidref] = 1;
96 /* init stats */
97 atomic_set(&(ste_bin_pol.ec_eval_count), 0);
98 atomic_set(&(ste_bin_pol.ec_denied_count), 0);
99 atomic_set(&(ste_bin_pol.ec_cachehit_count), 0);
100 atomic_set(&(ste_bin_pol.gt_eval_count), 0);
101 atomic_set(&(ste_bin_pol.gt_denied_count), 0);
102 atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
103 return ACM_OK;
104 }
107 /* ste initialization function hooks */
108 static int
109 ste_init_domain_ssid(void **ste_ssid, ssidref_t ssidref)
110 {
111 int i;
112 struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid);
113 traceprintk("%s.\n", __func__);
115 if (ste_ssidp == NULL)
116 return ACM_INIT_SSID_ERROR;
118 /* get policy-local ssid reference */
119 ste_ssidp->ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
120 if ((ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) ||
121 (ste_ssidp->ste_ssidref == ACM_DEFAULT_LOCAL_SSID)) {
122 printkd("%s: ERROR ste_ssidref (%x) undefined or unset (0).\n",
123 __func__, ste_ssidp->ste_ssidref);
124 xfree(ste_ssidp);
125 return ACM_INIT_SSID_ERROR;
126 }
127 /* clean ste cache */
128 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
129 ste_ssidp->ste_cache[i].valid = ACM_STE_free;
131 (*ste_ssid) = ste_ssidp;
132 printkd("%s: determined ste_ssidref to %x.\n",
133 __func__, ste_ssidp->ste_ssidref);
134 return ACM_OK;
135 }
138 static void
139 ste_free_domain_ssid(void *ste_ssid)
140 {
141 traceprintk("%s.\n", __func__);
142 xfree(ste_ssid);
143 return;
144 }
146 /* dump type enforcement cache; policy read-locked already */
147 static int
148 ste_dump_policy(u8 *buf, u32 buf_size) {
149 struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer *)buf;
150 int ret = 0;
152 if (buf_size < sizeof(struct acm_ste_policy_buffer))
153 return -EINVAL;
155 ste_buf->ste_max_types = cpu_to_be32(ste_bin_pol.max_types);
156 ste_buf->ste_max_ssidrefs = cpu_to_be32(ste_bin_pol.max_ssidrefs);
157 ste_buf->policy_code = cpu_to_be32(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
158 ste_buf->ste_ssid_offset = cpu_to_be32(sizeof(struct acm_ste_policy_buffer));
159 ret = be32_to_cpu(ste_buf->ste_ssid_offset) +
160 sizeof(domaintype_t)*ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types;
162 ret = (ret + 7) & ~7;
164 if (buf_size < ret)
165 return -EINVAL;
167 /* now copy buffer over */
168 arrcpy(buf + be32_to_cpu(ste_buf->ste_ssid_offset),
169 ste_bin_pol.ssidrefs,
170 sizeof(domaintype_t),
171 ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types);
173 return ret;
174 }
176 /* ste_init_state is called when a policy is changed to detect violations (return != 0).
177 * from a security point of view, we simulate that all running domains are re-started and
178 * all sharing decisions are replayed to detect violations or current sharing behavior
179 * (right now: event_channels, future: also grant_tables)
180 */
181 static int
182 ste_init_state(struct acm_ste_policy_buffer *ste_buf, domaintype_t *ssidrefs)
183 {
184 int violation = 1;
185 struct ste_ssid *ste_ssid, *ste_rssid;
186 ssidref_t ste_ssidref, ste_rssidref;
187 struct domain *d, *rdom;
188 domid_t rdomid;
189 struct active_grant_entry *act;
190 int port, i;
192 rcu_read_lock(&domlist_read_lock);
193 /* go by domain? or directly by global? event/grant list */
194 /* go through all domains and adjust policy as if this domain was started now */
195 for_each_domain ( d )
196 {
197 struct evtchn *ports;
198 unsigned int bucket;
199 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
200 (struct acm_ssid_domain *)d->ssid);
201 ste_ssidref = ste_ssid->ste_ssidref;
202 traceprintk("%s: validating policy for eventch domain %x (ste-Ref=%x).\n",
203 __func__, d->domain_id, ste_ssidref);
204 /* a) check for event channel conflicts */
205 for (bucket = 0; bucket < NR_EVTCHN_BUCKETS; bucket++) {
206 spin_lock(&d->evtchn_lock);
207 ports = d->evtchn[bucket];
208 if (ports == NULL) {
209 spin_unlock(&d->evtchn_lock);
210 break;
211 }
213 for (port=0; port < EVTCHNS_PER_BUCKET; port++) {
214 if (ports[port].state == ECS_INTERDOMAIN) {
215 rdom = ports[port].u.interdomain.remote_dom;
216 rdomid = rdom->domain_id;
217 } else {
218 continue; /* port unused */
219 }
221 /* rdom now has remote domain */
222 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
223 (struct acm_ssid_domain *)(rdom->ssid));
224 ste_rssidref = ste_rssid->ste_ssidref;
225 traceprintk("%s: eventch: domain %x (ssidref %x) --> "
226 "domain %x (rssidref %x) used (port %x).\n",
227 __func__, d->domain_id, ste_ssidref,
228 rdom->domain_id, ste_rssidref, port);
229 /* check whether on subj->ssid, obj->ssid share a common type*/
230 if (!have_common_type(ste_ssidref, ste_rssidref)) {
231 printkd("%s: Policy violation in event channel domain "
232 "%x -> domain %x.\n",
233 __func__, d->domain_id, rdomid);
234 spin_unlock(&d->evtchn_lock);
235 goto out;
236 }
237 }
238 spin_unlock(&d->evtchn_lock);
239 }
241 /* b) check for grant table conflicts on shared pages */
242 spin_lock(&d->grant_table->lock);
243 for ( i = 0; i < nr_active_grant_frames(d->grant_table); i++ ) {
244 #define APP (PAGE_SIZE / sizeof(struct active_grant_entry))
245 act = &d->grant_table->active[i/APP][i%APP];
246 if ( act->pin != 0 ) {
247 printkd("%s: grant dom (%hu) SHARED (%d) pin (%d) "
248 "dom:(%hu) frame:(%lx)\n",
249 __func__, d->domain_id, i, act->pin,
250 act->domid, (unsigned long)act->frame);
251 rdomid = act->domid;
252 if ((rdom = rcu_lock_domain_by_id(rdomid)) == NULL) {
253 spin_unlock(&d->grant_table->lock);
254 printkd("%s: domain not found ERROR!\n", __func__);
255 goto out;
256 }
257 /* rdom now has remote domain */
258 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
259 (struct acm_ssid_domain *)(rdom->ssid));
260 ste_rssidref = ste_rssid->ste_ssidref;
261 rcu_unlock_domain(rdom);
262 if (!have_common_type(ste_ssidref, ste_rssidref)) {
263 spin_unlock(&d->grant_table->lock);
264 printkd("%s: Policy violation in grant table "
265 "sharing domain %x -> domain %x.\n",
266 __func__, d->domain_id, rdomid);
267 goto out;
268 }
269 }
270 }
271 spin_unlock(&d->grant_table->lock);
272 }
273 violation = 0;
274 out:
275 rcu_read_unlock(&domlist_read_lock);
276 return violation;
277 /* returning "violation != 0" means that existing sharing between domains would not
278 * have been allowed if the new policy had been enforced before the sharing; for ste,
279 * this means that there are at least 2 domains that have established sharing through
280 * event-channels or grant-tables but these two domains don't have no longer a common
281 * type in their typesets referenced by their ssidrefs */
282 }
284 /* set new policy; policy write-locked already */
285 static int
286 ste_set_policy(u8 *buf, u32 buf_size, int is_bootpolicy)
287 {
288 struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer *)buf;
289 void *ssidrefsbuf;
290 struct ste_ssid *ste_ssid;
291 struct domain *d;
292 int i;
294 if (buf_size < sizeof(struct acm_ste_policy_buffer))
295 return -EINVAL;
297 /* Convert endianess of policy */
298 ste_buf->policy_code = be32_to_cpu(ste_buf->policy_code);
299 ste_buf->policy_version = be32_to_cpu(ste_buf->policy_version);
300 ste_buf->ste_max_types = be32_to_cpu(ste_buf->ste_max_types);
301 ste_buf->ste_max_ssidrefs = be32_to_cpu(ste_buf->ste_max_ssidrefs);
302 ste_buf->ste_ssid_offset = be32_to_cpu(ste_buf->ste_ssid_offset);
304 /* policy type and version checks */
305 if ((ste_buf->policy_code != ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) ||
306 (ste_buf->policy_version != ACM_STE_VERSION))
307 return -EINVAL;
309 /* 1. create and copy-in new ssidrefs buffer */
310 ssidrefsbuf = xmalloc_array(u8, sizeof(domaintype_t)*ste_buf->ste_max_types*ste_buf->ste_max_ssidrefs);
311 if (ssidrefsbuf == NULL) {
312 return -ENOMEM;
313 }
314 if (ste_buf->ste_ssid_offset + sizeof(domaintype_t) * ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types > buf_size)
315 goto error_free;
317 /* during boot dom0_chwall_ssidref is set */
318 if (is_bootpolicy && (dom0_ste_ssidref >= ste_buf->ste_max_ssidrefs)) {
319 goto error_free;
320 }
322 arrcpy(ssidrefsbuf,
323 buf + ste_buf->ste_ssid_offset,
324 sizeof(domaintype_t),
325 ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
327 /* 2. now re-calculate sharing decisions based on running domains;
328 * this can fail if new policy is conflicting with sharing of running domains
329 * now: reject violating new policy; future: adjust sharing through revoking sharing */
330 if (ste_init_state(ste_buf, (domaintype_t *)ssidrefsbuf)) {
331 printk("%s: New policy conflicts with running domains. Policy load aborted.\n", __func__);
332 goto error_free; /* new policy conflicts with sharing of running domains */
333 }
334 /* 3. replace old policy (activate new policy) */
335 ste_bin_pol.max_types = ste_buf->ste_max_types;
336 ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
337 xfree(ste_bin_pol.ssidrefs);
338 ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
340 /* clear all ste caches */
341 rcu_read_lock(&domlist_read_lock);
342 for_each_domain ( d ) {
343 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
344 (struct acm_ssid_domain *)(d)->ssid);
345 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
346 ste_ssid->ste_cache[i].valid = ACM_STE_free;
347 }
348 rcu_read_unlock(&domlist_read_lock);
349 return ACM_OK;
351 error_free:
352 printk("%s: ERROR setting policy.\n", __func__);
353 xfree(ssidrefsbuf);
354 return -EFAULT;
355 }
357 static int
358 ste_dump_stats(u8 *buf, u16 buf_len)
359 {
360 struct acm_ste_stats_buffer stats;
362 /* now send the hook counts to user space */
363 stats.ec_eval_count = cpu_to_be32(atomic_read(&ste_bin_pol.ec_eval_count));
364 stats.gt_eval_count = cpu_to_be32(atomic_read(&ste_bin_pol.gt_eval_count));
365 stats.ec_denied_count = cpu_to_be32(atomic_read(&ste_bin_pol.ec_denied_count));
366 stats.gt_denied_count = cpu_to_be32(atomic_read(&ste_bin_pol.gt_denied_count));
367 stats.ec_cachehit_count = cpu_to_be32(atomic_read(&ste_bin_pol.ec_cachehit_count));
368 stats.gt_cachehit_count = cpu_to_be32(atomic_read(&ste_bin_pol.gt_cachehit_count));
370 if (buf_len < sizeof(struct acm_ste_stats_buffer))
371 return -ENOMEM;
373 memcpy(buf, &stats, sizeof(struct acm_ste_stats_buffer));
374 return sizeof(struct acm_ste_stats_buffer);
375 }
377 static int
378 ste_dump_ssid_types(ssidref_t ssidref, u8 *buf, u16 len)
379 {
380 int i;
382 /* fill in buffer */
383 if (ste_bin_pol.max_types > len)
384 return -EFAULT;
386 if (ssidref >= ste_bin_pol.max_ssidrefs)
387 return -EFAULT;
389 /* read types for chwall ssidref */
390 for(i=0; i< ste_bin_pol.max_types; i++) {
391 if (ste_bin_pol.ssidrefs[ssidref * ste_bin_pol.max_types + i])
392 buf[i] = 1;
393 else
394 buf[i] = 0;
395 }
396 return ste_bin_pol.max_types;
397 }
399 /* we need to go through this before calling the hooks,
400 * returns 1 == cache hit */
401 static int inline
402 check_cache(struct domain *dom, domid_t rdom) {
403 struct ste_ssid *ste_ssid;
404 int i;
406 printkd("checking cache: %x --> %x.\n", dom->domain_id, rdom);
408 if (dom->ssid == NULL)
409 return 0;
410 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
411 (struct acm_ssid_domain *)(dom->ssid));
413 for(i=0; i< ACM_TE_CACHE_SIZE; i++) {
414 if ((ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
415 (ste_ssid->ste_cache[i].id == rdom)) {
416 printkd("cache hit (entry %x, id= %x!\n", i, ste_ssid->ste_cache[i].id);
417 return 1;
418 }
419 }
420 return 0;
421 }
424 /* we only get here if there is NO entry yet; no duplication check! */
425 static void inline
426 cache_result(struct domain *subj, struct domain *obj) {
427 struct ste_ssid *ste_ssid;
428 int i;
429 printkd("caching from doms: %x --> %x.\n", subj->domain_id, obj->domain_id);
430 if (subj->ssid == NULL)
431 return;
432 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
433 (struct acm_ssid_domain *)(subj)->ssid);
434 for(i=0; i< ACM_TE_CACHE_SIZE; i++)
435 if (ste_ssid->ste_cache[i].valid == ACM_STE_free)
436 break;
437 if (i< ACM_TE_CACHE_SIZE) {
438 ste_ssid->ste_cache[i].valid = ACM_STE_valid;
439 ste_ssid->ste_cache[i].id = obj->domain_id;
440 } else
441 printk ("Cache of dom %x is full!\n", subj->domain_id);
442 }
444 /* deletes entries for domain 'id' from all caches (re-use) */
445 static void inline
446 clean_id_from_cache(domid_t id)
447 {
448 struct ste_ssid *ste_ssid;
449 int i;
450 struct domain *d;
451 struct acm_ssid_domain *ssid;
453 printkd("deleting cache for dom %x.\n", id);
454 rcu_read_lock(&domlist_read_lock);
455 /* look through caches of all domains */
456 for_each_domain ( d ) {
457 ssid = (struct acm_ssid_domain *)(d->ssid);
459 if (ssid == NULL)
460 continue; /* hanging domain structure, no ssid any more ... */
461 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssid);
462 if (!ste_ssid) {
463 printk("%s: deleting ID from cache ERROR (no ste_ssid)!\n",
464 __func__);
465 goto out;
466 }
467 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
468 if ((ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
469 (ste_ssid->ste_cache[i].id == id))
470 ste_ssid->ste_cache[i].valid = ACM_STE_free;
471 }
472 out:
473 rcu_read_unlock(&domlist_read_lock);
474 }
476 /***************************
477 * Authorization functions
478 **************************/
479 static int
480 ste_pre_domain_create(void *subject_ssid, ssidref_t ssidref)
481 {
482 /* check for ssidref in range for policy */
483 ssidref_t ste_ssidref;
484 traceprintk("%s.\n", __func__);
486 read_lock(&acm_bin_pol_rwlock);
487 ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
488 if (ste_ssidref == ACM_DEFAULT_LOCAL_SSID) {
489 printk("%s: ERROR STE SSID is NOT SET but policy enforced.\n", __func__);
490 read_unlock(&acm_bin_pol_rwlock);
491 return ACM_ACCESS_DENIED; /* catching and indicating config error */
492 }
493 if (ste_ssidref >= ste_bin_pol.max_ssidrefs) {
494 printk("%s: ERROR ste_ssidref > max(%x).\n",
495 __func__, ste_bin_pol.max_ssidrefs-1);
496 read_unlock(&acm_bin_pol_rwlock);
497 return ACM_ACCESS_DENIED;
498 }
499 read_unlock(&acm_bin_pol_rwlock);
500 return ACM_ACCESS_PERMITTED;
501 }
503 static void
504 ste_post_domain_destroy(void *subject_ssid, domid_t id)
505 {
506 /* clean all cache entries for destroyed domain (might be re-used) */
507 clean_id_from_cache(id);
508 }
510 /* -------- EVENTCHANNEL OPERATIONS -----------*/
511 static int
512 ste_pre_eventchannel_unbound(domid_t id1, domid_t id2) {
513 struct domain *subj, *obj;
514 int ret;
515 traceprintk("%s: dom%x-->dom%x.\n", __func__,
516 (id1 == DOMID_SELF) ? current->domain->domain_id : id1,
517 (id2 == DOMID_SELF) ? current->domain->domain_id : id2);
519 if (id1 == DOMID_SELF) id1 = current->domain->domain_id;
520 if (id2 == DOMID_SELF) id2 = current->domain->domain_id;
522 subj = rcu_lock_domain_by_id(id1);
523 obj = rcu_lock_domain_by_id(id2);
524 if ((subj == NULL) || (obj == NULL)) {
525 ret = ACM_ACCESS_DENIED;
526 goto out;
527 }
528 /* cache check late */
529 if (check_cache(subj, obj->domain_id)) {
530 atomic_inc(&ste_bin_pol.ec_cachehit_count);
531 ret = ACM_ACCESS_PERMITTED;
532 goto out;
533 }
534 atomic_inc(&ste_bin_pol.ec_eval_count);
536 if (share_common_type(subj, obj)) {
537 cache_result(subj, obj);
538 ret = ACM_ACCESS_PERMITTED;
539 } else {
540 atomic_inc(&ste_bin_pol.ec_denied_count);
541 ret = ACM_ACCESS_DENIED;
542 }
543 out:
544 if (obj != NULL)
545 rcu_unlock_domain(obj);
546 if (subj != NULL)
547 rcu_unlock_domain(subj);
548 return ret;
549 }
551 static int
552 ste_pre_eventchannel_interdomain(domid_t id)
553 {
554 struct domain *subj=NULL, *obj=NULL;
555 int ret;
557 traceprintk("%s: dom%x-->dom%x.\n", __func__,
558 current->domain->domain_id,
559 (id == DOMID_SELF) ? current->domain->domain_id : id);
561 /* following is a bit longer but ensures that we
562 * "put" only domains that we where "find"-ing
563 */
564 if (id == DOMID_SELF) id = current->domain->domain_id;
566 subj = current->domain;
567 obj = rcu_lock_domain_by_id(id);
568 if (obj == NULL) {
569 ret = ACM_ACCESS_DENIED;
570 goto out;
571 }
573 /* cache check late, but evtchn is not on performance critical path */
574 if (check_cache(subj, obj->domain_id)) {
575 atomic_inc(&ste_bin_pol.ec_cachehit_count);
576 ret = ACM_ACCESS_PERMITTED;
577 goto out;
578 }
580 atomic_inc(&ste_bin_pol.ec_eval_count);
582 if (share_common_type(subj, obj)) {
583 cache_result(subj, obj);
584 ret = ACM_ACCESS_PERMITTED;
585 } else {
586 atomic_inc(&ste_bin_pol.ec_denied_count);
587 ret = ACM_ACCESS_DENIED;
588 }
589 out:
590 if (obj != NULL)
591 rcu_unlock_domain(obj);
592 return ret;
593 }
595 /* -------- SHARED MEMORY OPERATIONS -----------*/
597 static int
598 ste_pre_grant_map_ref (domid_t id) {
599 struct domain *obj, *subj;
600 int ret;
601 traceprintk("%s: dom%x-->dom%x.\n", __func__,
602 current->domain->domain_id, id);
604 if (check_cache(current->domain, id)) {
605 atomic_inc(&ste_bin_pol.gt_cachehit_count);
606 return ACM_ACCESS_PERMITTED;
607 }
608 atomic_inc(&ste_bin_pol.gt_eval_count);
609 subj = current->domain;
610 obj = rcu_lock_domain_by_id(id);
612 if (share_common_type(subj, obj)) {
613 cache_result(subj, obj);
614 ret = ACM_ACCESS_PERMITTED;
615 } else {
616 atomic_inc(&ste_bin_pol.gt_denied_count);
617 printkd("%s: ACCESS DENIED!\n", __func__);
618 ret = ACM_ACCESS_DENIED;
619 }
620 if (obj != NULL)
621 rcu_unlock_domain(obj);
622 return ret;
623 }
626 /* since setting up grant tables involves some implicit information
627 flow from the creating domain to the domain that is setup, we
628 check types in addition to the general authorization */
629 static int
630 ste_pre_grant_setup (domid_t id) {
631 struct domain *obj, *subj;
632 int ret;
633 traceprintk("%s: dom%x-->dom%x.\n", __func__,
634 current->domain->domain_id, id);
636 if (check_cache(current->domain, id)) {
637 atomic_inc(&ste_bin_pol.gt_cachehit_count);
638 return ACM_ACCESS_PERMITTED;
639 }
640 atomic_inc(&ste_bin_pol.gt_eval_count);
641 /* a) check authorization (eventually use specific capabilities) */
642 if (!IS_PRIV(current->domain)) {
643 printk("%s: Grant table management authorization denied ERROR!\n", __func__);
644 return ACM_ACCESS_DENIED;
645 }
646 /* b) check types */
647 subj = current->domain;
648 obj = rcu_lock_domain_by_id(id);
650 if (share_common_type(subj, obj)) {
651 cache_result(subj, obj);
652 ret = ACM_ACCESS_PERMITTED;
653 } else {
654 atomic_inc(&ste_bin_pol.gt_denied_count);
655 ret = ACM_ACCESS_DENIED;
656 }
657 if (obj != NULL)
658 rcu_unlock_domain(obj);
659 return ret;
660 }
662 /* -------- DOMAIN-Requested Decision hooks -----------*/
664 static int
665 ste_sharing(ssidref_t ssidref1, ssidref_t ssidref2) {
666 if (have_common_type (
667 GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref1),
668 GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref2)
669 ))
670 return ACM_ACCESS_PERMITTED;
671 else
672 return ACM_ACCESS_DENIED;
673 }
676 /* now define the hook structure similarly to LSM */
677 struct acm_operations acm_simple_type_enforcement_ops = {
679 /* policy management services */
680 .init_domain_ssid = ste_init_domain_ssid,
681 .free_domain_ssid = ste_free_domain_ssid,
682 .dump_binary_policy = ste_dump_policy,
683 .set_binary_policy = ste_set_policy,
684 .dump_statistics = ste_dump_stats,
685 .dump_ssid_types = ste_dump_ssid_types,
687 /* domain management control hooks */
688 .pre_domain_create = ste_pre_domain_create,
689 .post_domain_create = NULL,
690 .fail_domain_create = NULL,
691 .post_domain_destroy = ste_post_domain_destroy,
693 /* event channel control hooks */
694 .pre_eventchannel_unbound = ste_pre_eventchannel_unbound,
695 .fail_eventchannel_unbound = NULL,
696 .pre_eventchannel_interdomain = ste_pre_eventchannel_interdomain,
697 .fail_eventchannel_interdomain = NULL,
699 /* grant table control hooks */
700 .pre_grant_map_ref = ste_pre_grant_map_ref,
701 .fail_grant_map_ref = NULL,
702 .pre_grant_setup = ste_pre_grant_setup,
703 .fail_grant_setup = NULL,
704 .sharing = ste_sharing,
705 };
707 /*
708 * Local variables:
709 * mode: C
710 * c-set-style: "BSD"
711 * c-basic-offset: 4
712 * tab-width: 4
713 * indent-tabs-mode: nil
714 * End:
715 */