ia64/xen-unstable

view xen/acm/acm_simple_type_enforcement_hooks.c @ 14058:97826d77bd4d

Use RCU for domain_list and domain_hash.

Signed-off-by: Jose Renato Santos <jsantos@hpl.hp.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Feb 21 16:13:49 2007 +0000 (2007-02-21)
parents c57d80520e7a
children d4cd2c70cdea
line source
1 /****************************************************************
2 * acm_simple_type_enforcement_hooks.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 *
6 * Author:
7 * Reiner Sailer <sailer@watson.ibm.com>
8 *
9 * Contributors:
10 * Stefan Berger <stefanb@watson.ibm.com>
11 * support for network order binary policies
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation, version 2 of the
16 * License.
17 *
18 * sHype Simple Type Enforcement for Xen
19 * STE allows to control which domains can setup sharing
20 * (eventchannels right now) with which other domains. Hooks
21 * are defined and called throughout Xen when domains bind to
22 * shared resources (setup eventchannels) and a domain is allowed
23 * to setup sharing with another domain if and only if both domains
24 * share at least on common type.
25 *
26 */
28 #include <xen/lib.h>
29 #include <asm/types.h>
30 #include <asm/current.h>
31 #include <acm/acm_hooks.h>
32 #include <asm/atomic.h>
33 #include <acm/acm_endian.h>
35 /* local cache structures for STE policy */
36 struct ste_binary_policy ste_bin_pol;
38 static inline int have_common_type (ssidref_t ref1, ssidref_t ref2) {
39 int i;
40 for(i=0; i< ste_bin_pol.max_types; i++)
41 if ( ste_bin_pol.ssidrefs[ref1*ste_bin_pol.max_types + i] &&
42 ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i]) {
43 printkd("%s: common type #%02x.\n", __func__, i);
44 return 1;
45 }
46 return 0;
47 }
49 /* Helper function: return = (subj and obj share a common type) */
50 static int share_common_type(struct domain *subj, struct domain *obj)
51 {
52 ssidref_t ref_s, ref_o;
53 int ret;
55 if ((subj == NULL) || (obj == NULL) || (subj->ssid == NULL) || (obj->ssid == NULL))
56 return 0;
57 read_lock(&acm_bin_pol_rwlock);
58 /* lookup the policy-local ssids */
59 ref_s = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
60 (struct acm_ssid_domain *)subj->ssid)))->ste_ssidref;
61 ref_o = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
62 (struct acm_ssid_domain *)obj->ssid)))->ste_ssidref;
63 /* check whether subj and obj share a common ste type */
64 ret = have_common_type(ref_s, ref_o);
65 read_unlock(&acm_bin_pol_rwlock);
66 return ret;
67 }
69 /*
70 * Initializing STE policy (will be filled by policy partition
71 * using setpolicy command)
72 */
73 int acm_init_ste_policy(void)
74 {
75 /* minimal startup policy; policy write-locked already */
76 ste_bin_pol.max_types = 1;
77 ste_bin_pol.max_ssidrefs = 2;
78 ste_bin_pol.ssidrefs = (domaintype_t *)xmalloc_array(domaintype_t, 2);
79 memset(ste_bin_pol.ssidrefs, 0, 2);
81 if (ste_bin_pol.ssidrefs == NULL)
82 return ACM_INIT_SSID_ERROR;
84 /* initialize state so that dom0 can start up and communicate with itself */
85 ste_bin_pol.ssidrefs[1] = 1;
87 /* init stats */
88 atomic_set(&(ste_bin_pol.ec_eval_count), 0);
89 atomic_set(&(ste_bin_pol.ec_denied_count), 0);
90 atomic_set(&(ste_bin_pol.ec_cachehit_count), 0);
91 atomic_set(&(ste_bin_pol.gt_eval_count), 0);
92 atomic_set(&(ste_bin_pol.gt_denied_count), 0);
93 atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
94 return ACM_OK;
95 }
98 /* ste initialization function hooks */
99 static int
100 ste_init_domain_ssid(void **ste_ssid, ssidref_t ssidref)
101 {
102 int i;
103 struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid);
104 traceprintk("%s.\n", __func__);
106 if (ste_ssidp == NULL)
107 return ACM_INIT_SSID_ERROR;
109 /* get policy-local ssid reference */
110 ste_ssidp->ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
111 if ((ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) ||
112 (ste_ssidp->ste_ssidref == ACM_DEFAULT_LOCAL_SSID)) {
113 printkd("%s: ERROR ste_ssidref (%x) undefined or unset (0).\n",
114 __func__, ste_ssidp->ste_ssidref);
115 xfree(ste_ssidp);
116 return ACM_INIT_SSID_ERROR;
117 }
118 /* clean ste cache */
119 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
120 ste_ssidp->ste_cache[i].valid = ACM_STE_free;
122 (*ste_ssid) = ste_ssidp;
123 printkd("%s: determined ste_ssidref to %x.\n",
124 __func__, ste_ssidp->ste_ssidref);
125 return ACM_OK;
126 }
129 static void
130 ste_free_domain_ssid(void *ste_ssid)
131 {
132 traceprintk("%s.\n", __func__);
133 xfree(ste_ssid);
134 return;
135 }
137 /* dump type enforcement cache; policy read-locked already */
138 static int
139 ste_dump_policy(u8 *buf, u32 buf_size) {
140 struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer *)buf;
141 int ret = 0;
143 if (buf_size < sizeof(struct acm_ste_policy_buffer))
144 return -EINVAL;
146 ste_buf->ste_max_types = cpu_to_be32(ste_bin_pol.max_types);
147 ste_buf->ste_max_ssidrefs = cpu_to_be32(ste_bin_pol.max_ssidrefs);
148 ste_buf->policy_code = cpu_to_be32(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
149 ste_buf->ste_ssid_offset = cpu_to_be32(sizeof(struct acm_ste_policy_buffer));
150 ret = be32_to_cpu(ste_buf->ste_ssid_offset) +
151 sizeof(domaintype_t)*ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types;
153 ret = (ret + 7) & ~7;
155 if (buf_size < ret)
156 return -EINVAL;
158 /* now copy buffer over */
159 arrcpy(buf + be32_to_cpu(ste_buf->ste_ssid_offset),
160 ste_bin_pol.ssidrefs,
161 sizeof(domaintype_t),
162 ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types);
164 return ret;
165 }
167 /* ste_init_state is called when a policy is changed to detect violations (return != 0).
168 * from a security point of view, we simulate that all running domains are re-started and
169 * all sharing decisions are replayed to detect violations or current sharing behavior
170 * (right now: event_channels, future: also grant_tables)
171 */
172 static int
173 ste_init_state(struct acm_ste_policy_buffer *ste_buf, domaintype_t *ssidrefs)
174 {
175 int violation = 1;
176 struct ste_ssid *ste_ssid, *ste_rssid;
177 ssidref_t ste_ssidref, ste_rssidref;
178 struct domain *d, *rdom;
179 domid_t rdomid;
180 struct grant_entry sha_copy;
181 int port, i;
183 rcu_read_lock(&domlist_read_lock);
184 /* go by domain? or directly by global? event/grant list */
185 /* go through all domains and adjust policy as if this domain was started now */
186 for_each_domain ( d )
187 {
188 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
189 (struct acm_ssid_domain *)d->ssid);
190 ste_ssidref = ste_ssid->ste_ssidref;
191 traceprintk("%s: validating policy for eventch domain %x (ste-Ref=%x).\n",
192 __func__, d->domain_id, ste_ssidref);
193 /* a) check for event channel conflicts */
194 for (port=0; port < NR_EVTCHN_BUCKETS; port++) {
195 spin_lock(&d->evtchn_lock);
196 if (d->evtchn[port] == NULL) {
197 spin_unlock(&d->evtchn_lock);
198 continue;
199 }
200 if (d->evtchn[port]->state == ECS_INTERDOMAIN) {
201 rdom = d->evtchn[port]->u.interdomain.remote_dom;
202 rdomid = rdom->domain_id;
203 /* rdom now has remote domain */
204 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
205 (struct acm_ssid_domain *)(rdom->ssid));
206 ste_rssidref = ste_rssid->ste_ssidref;
207 } else if (d->evtchn[port]->state == ECS_UNBOUND) {
208 rdomid = d->evtchn[port]->u.unbound.remote_domid;
209 if ((rdom = get_domain_by_id(rdomid)) == NULL) {
210 printk("%s: Error finding domain to id %x!\n", __func__, rdomid);
211 goto out;
212 }
213 /* rdom now has remote domain */
214 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
215 (struct acm_ssid_domain *)(rdom->ssid));
216 ste_rssidref = ste_rssid->ste_ssidref;
217 put_domain(rdom);
218 } else {
219 spin_unlock(&d->evtchn_lock);
220 continue; /* port unused */
221 }
222 spin_unlock(&d->evtchn_lock);
224 /* rdom now has remote domain */
225 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
226 (struct acm_ssid_domain *)(rdom->ssid));
227 ste_rssidref = ste_rssid->ste_ssidref;
228 traceprintk("%s: eventch: domain %x (ssidref %x) --> domain %x (rssidref %x) used (port %x).\n",
229 __func__, d->domain_id, ste_ssidref, rdom->domain_id, ste_rssidref, port);
230 /* check whether on subj->ssid, obj->ssid share a common type*/
231 if (!have_common_type(ste_ssidref, ste_rssidref)) {
232 printkd("%s: Policy violation in event channel domain %x -> domain %x.\n",
233 __func__, d->domain_id, rdomid);
234 goto out;
235 }
236 }
237 /* b) check for grant table conflicts on shared pages */
238 spin_lock(&d->grant_table->lock);
239 for ( i = 0; i < nr_grant_entries(d->grant_table); i++ ) {
240 #define SPP (PAGE_SIZE / sizeof(struct grant_entry))
241 sha_copy = d->grant_table->shared[i/SPP][i%SPP];
242 if ( sha_copy.flags ) {
243 printkd("%s: grant dom (%hu) SHARED (%d) flags:(%hx) dom:(%hu) frame:(%lx)\n",
244 __func__, d->domain_id, i, sha_copy.flags, sha_copy.domid,
245 (unsigned long)sha_copy.frame);
246 rdomid = sha_copy.domid;
247 if ((rdom = get_domain_by_id(rdomid)) == NULL) {
248 spin_unlock(&d->grant_table->lock);
249 printkd("%s: domain not found ERROR!\n", __func__);
250 goto out;
251 };
252 /* rdom now has remote domain */
253 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
254 (struct acm_ssid_domain *)(rdom->ssid));
255 ste_rssidref = ste_rssid->ste_ssidref;
256 put_domain(rdom);
257 if (!have_common_type(ste_ssidref, ste_rssidref)) {
258 spin_unlock(&d->grant_table->lock);
259 printkd("%s: Policy violation in grant table sharing domain %x -> domain %x.\n",
260 __func__, d->domain_id, rdomid);
261 goto out;
262 }
263 }
264 }
265 spin_unlock(&d->grant_table->lock);
266 }
267 violation = 0;
268 out:
269 rcu_read_unlock(&domlist_read_lock);
270 return violation;
271 /* returning "violation != 0" means that existing sharing between domains would not
272 * have been allowed if the new policy had been enforced before the sharing; for ste,
273 * this means that there are at least 2 domains that have established sharing through
274 * event-channels or grant-tables but these two domains don't have no longer a common
275 * type in their typesets referenced by their ssidrefs */
276 }
278 /* set new policy; policy write-locked already */
279 static int
280 ste_set_policy(u8 *buf, u32 buf_size)
281 {
282 struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer *)buf;
283 void *ssidrefsbuf;
284 struct ste_ssid *ste_ssid;
285 struct domain *d;
286 int i;
288 if (buf_size < sizeof(struct acm_ste_policy_buffer))
289 return -EINVAL;
291 /* Convert endianess of policy */
292 ste_buf->policy_code = be32_to_cpu(ste_buf->policy_code);
293 ste_buf->policy_version = be32_to_cpu(ste_buf->policy_version);
294 ste_buf->ste_max_types = be32_to_cpu(ste_buf->ste_max_types);
295 ste_buf->ste_max_ssidrefs = be32_to_cpu(ste_buf->ste_max_ssidrefs);
296 ste_buf->ste_ssid_offset = be32_to_cpu(ste_buf->ste_ssid_offset);
298 /* policy type and version checks */
299 if ((ste_buf->policy_code != ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) ||
300 (ste_buf->policy_version != ACM_STE_VERSION))
301 return -EINVAL;
303 /* 1. create and copy-in new ssidrefs buffer */
304 ssidrefsbuf = xmalloc_array(u8, sizeof(domaintype_t)*ste_buf->ste_max_types*ste_buf->ste_max_ssidrefs);
305 if (ssidrefsbuf == NULL) {
306 return -ENOMEM;
307 }
308 if (ste_buf->ste_ssid_offset + sizeof(domaintype_t) * ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types > buf_size)
309 goto error_free;
311 arrcpy(ssidrefsbuf,
312 buf + ste_buf->ste_ssid_offset,
313 sizeof(domaintype_t),
314 ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
316 /* 2. now re-calculate sharing decisions based on running domains;
317 * this can fail if new policy is conflicting with sharing of running domains
318 * now: reject violating new policy; future: adjust sharing through revoking sharing */
319 if (ste_init_state(ste_buf, (domaintype_t *)ssidrefsbuf)) {
320 printk("%s: New policy conflicts with running domains. Policy load aborted.\n", __func__);
321 goto error_free; /* new policy conflicts with sharing of running domains */
322 }
323 /* 3. replace old policy (activate new policy) */
324 ste_bin_pol.max_types = ste_buf->ste_max_types;
325 ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
326 xfree(ste_bin_pol.ssidrefs);
327 ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
329 /* clear all ste caches */
330 rcu_read_lock(&domlist_read_lock);
331 for_each_domain ( d ) {
332 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
333 (struct acm_ssid_domain *)(d)->ssid);
334 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
335 ste_ssid->ste_cache[i].valid = ACM_STE_free;
336 }
337 rcu_read_unlock(&domlist_read_lock);
338 return ACM_OK;
340 error_free:
341 printk("%s: ERROR setting policy.\n", __func__);
342 xfree(ssidrefsbuf);
343 return -EFAULT;
344 }
346 static int
347 ste_dump_stats(u8 *buf, u16 buf_len)
348 {
349 struct acm_ste_stats_buffer stats;
351 /* now send the hook counts to user space */
352 stats.ec_eval_count = cpu_to_be32(atomic_read(&ste_bin_pol.ec_eval_count));
353 stats.gt_eval_count = cpu_to_be32(atomic_read(&ste_bin_pol.gt_eval_count));
354 stats.ec_denied_count = cpu_to_be32(atomic_read(&ste_bin_pol.ec_denied_count));
355 stats.gt_denied_count = cpu_to_be32(atomic_read(&ste_bin_pol.gt_denied_count));
356 stats.ec_cachehit_count = cpu_to_be32(atomic_read(&ste_bin_pol.ec_cachehit_count));
357 stats.gt_cachehit_count = cpu_to_be32(atomic_read(&ste_bin_pol.gt_cachehit_count));
359 if (buf_len < sizeof(struct acm_ste_stats_buffer))
360 return -ENOMEM;
362 memcpy(buf, &stats, sizeof(struct acm_ste_stats_buffer));
363 return sizeof(struct acm_ste_stats_buffer);
364 }
366 static int
367 ste_dump_ssid_types(ssidref_t ssidref, u8 *buf, u16 len)
368 {
369 int i;
371 /* fill in buffer */
372 if (ste_bin_pol.max_types > len)
373 return -EFAULT;
375 if (ssidref >= ste_bin_pol.max_ssidrefs)
376 return -EFAULT;
378 /* read types for chwall ssidref */
379 for(i=0; i< ste_bin_pol.max_types; i++) {
380 if (ste_bin_pol.ssidrefs[ssidref * ste_bin_pol.max_types + i])
381 buf[i] = 1;
382 else
383 buf[i] = 0;
384 }
385 return ste_bin_pol.max_types;
386 }
388 /* we need to go through this before calling the hooks,
389 * returns 1 == cache hit */
390 static int inline
391 check_cache(struct domain *dom, domid_t rdom) {
392 struct ste_ssid *ste_ssid;
393 int i;
395 printkd("checking cache: %x --> %x.\n", dom->domain_id, rdom);
397 if (dom->ssid == NULL)
398 return 0;
399 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
400 (struct acm_ssid_domain *)(dom->ssid));
402 for(i=0; i< ACM_TE_CACHE_SIZE; i++) {
403 if ((ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
404 (ste_ssid->ste_cache[i].id == rdom)) {
405 printkd("cache hit (entry %x, id= %x!\n", i, ste_ssid->ste_cache[i].id);
406 return 1;
407 }
408 }
409 return 0;
410 }
413 /* we only get here if there is NO entry yet; no duplication check! */
414 static void inline
415 cache_result(struct domain *subj, struct domain *obj) {
416 struct ste_ssid *ste_ssid;
417 int i;
418 printkd("caching from doms: %x --> %x.\n", subj->domain_id, obj->domain_id);
419 if (subj->ssid == NULL)
420 return;
421 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
422 (struct acm_ssid_domain *)(subj)->ssid);
423 for(i=0; i< ACM_TE_CACHE_SIZE; i++)
424 if (ste_ssid->ste_cache[i].valid == ACM_STE_free)
425 break;
426 if (i< ACM_TE_CACHE_SIZE) {
427 ste_ssid->ste_cache[i].valid = ACM_STE_valid;
428 ste_ssid->ste_cache[i].id = obj->domain_id;
429 } else
430 printk ("Cache of dom %x is full!\n", subj->domain_id);
431 }
433 /* deletes entries for domain 'id' from all caches (re-use) */
434 static void inline
435 clean_id_from_cache(domid_t id)
436 {
437 struct ste_ssid *ste_ssid;
438 int i;
439 struct domain *d;
440 struct acm_ssid_domain *ssid;
442 printkd("deleting cache for dom %x.\n", id);
443 rcu_read_lock(&domlist_read_lock);
444 /* look through caches of all domains */
445 for_each_domain ( d ) {
446 ssid = (struct acm_ssid_domain *)(d->ssid);
448 if (ssid == NULL)
449 continue; /* hanging domain structure, no ssid any more ... */
450 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssid);
451 if (!ste_ssid) {
452 printk("%s: deleting ID from cache ERROR (no ste_ssid)!\n",
453 __func__);
454 goto out;
455 }
456 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
457 if ((ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
458 (ste_ssid->ste_cache[i].id == id))
459 ste_ssid->ste_cache[i].valid = ACM_STE_free;
460 }
461 out:
462 rcu_read_unlock(&domlist_read_lock);
463 }
465 /***************************
466 * Authorization functions
467 **************************/
468 static int
469 ste_pre_domain_create(void *subject_ssid, ssidref_t ssidref)
470 {
471 /* check for ssidref in range for policy */
472 ssidref_t ste_ssidref;
473 traceprintk("%s.\n", __func__);
475 read_lock(&acm_bin_pol_rwlock);
476 ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
477 if (ste_ssidref == ACM_DEFAULT_LOCAL_SSID) {
478 printk("%s: ERROR STE SSID is NOT SET but policy enforced.\n", __func__);
479 read_unlock(&acm_bin_pol_rwlock);
480 return ACM_ACCESS_DENIED; /* catching and indicating config error */
481 }
482 if (ste_ssidref >= ste_bin_pol.max_ssidrefs) {
483 printk("%s: ERROR ste_ssidref > max(%x).\n",
484 __func__, ste_bin_pol.max_ssidrefs-1);
485 read_unlock(&acm_bin_pol_rwlock);
486 return ACM_ACCESS_DENIED;
487 }
488 read_unlock(&acm_bin_pol_rwlock);
489 return ACM_ACCESS_PERMITTED;
490 }
492 static void
493 ste_post_domain_destroy(void *subject_ssid, domid_t id)
494 {
495 /* clean all cache entries for destroyed domain (might be re-used) */
496 clean_id_from_cache(id);
497 }
499 /* -------- EVENTCHANNEL OPERATIONS -----------*/
500 static int
501 ste_pre_eventchannel_unbound(domid_t id1, domid_t id2) {
502 struct domain *subj, *obj;
503 int ret;
504 traceprintk("%s: dom%x-->dom%x.\n", __func__,
505 (id1 == DOMID_SELF) ? current->domain->domain_id : id1,
506 (id2 == DOMID_SELF) ? current->domain->domain_id : id2);
508 if (id1 == DOMID_SELF) id1 = current->domain->domain_id;
509 if (id2 == DOMID_SELF) id2 = current->domain->domain_id;
511 subj = get_domain_by_id(id1);
512 obj = get_domain_by_id(id2);
513 if ((subj == NULL) || (obj == NULL)) {
514 ret = ACM_ACCESS_DENIED;
515 goto out;
516 }
517 /* cache check late */
518 if (check_cache(subj, obj->domain_id)) {
519 atomic_inc(&ste_bin_pol.ec_cachehit_count);
520 ret = ACM_ACCESS_PERMITTED;
521 goto out;
522 }
523 atomic_inc(&ste_bin_pol.ec_eval_count);
525 if (share_common_type(subj, obj)) {
526 cache_result(subj, obj);
527 ret = ACM_ACCESS_PERMITTED;
528 } else {
529 atomic_inc(&ste_bin_pol.ec_denied_count);
530 ret = ACM_ACCESS_DENIED;
531 }
532 out:
533 if (obj != NULL)
534 put_domain(obj);
535 if (subj != NULL)
536 put_domain(subj);
537 return ret;
538 }
540 static int
541 ste_pre_eventchannel_interdomain(domid_t id)
542 {
543 struct domain *subj=NULL, *obj=NULL;
544 int ret;
546 traceprintk("%s: dom%x-->dom%x.\n", __func__,
547 current->domain->domain_id,
548 (id == DOMID_SELF) ? current->domain->domain_id : id);
550 /* following is a bit longer but ensures that we
551 * "put" only domains that we where "find"-ing
552 */
553 if (id == DOMID_SELF) id = current->domain->domain_id;
555 subj = current->domain;
556 obj = get_domain_by_id(id);
557 if (obj == NULL) {
558 ret = ACM_ACCESS_DENIED;
559 goto out;
560 }
562 /* cache check late, but evtchn is not on performance critical path */
563 if (check_cache(subj, obj->domain_id)) {
564 atomic_inc(&ste_bin_pol.ec_cachehit_count);
565 ret = ACM_ACCESS_PERMITTED;
566 goto out;
567 }
569 atomic_inc(&ste_bin_pol.ec_eval_count);
571 if (share_common_type(subj, obj)) {
572 cache_result(subj, obj);
573 ret = ACM_ACCESS_PERMITTED;
574 } else {
575 atomic_inc(&ste_bin_pol.ec_denied_count);
576 ret = ACM_ACCESS_DENIED;
577 }
578 out:
579 if (obj != NULL)
580 put_domain(obj);
581 return ret;
582 }
584 /* -------- SHARED MEMORY OPERATIONS -----------*/
586 static int
587 ste_pre_grant_map_ref (domid_t id) {
588 struct domain *obj, *subj;
589 int ret;
590 traceprintk("%s: dom%x-->dom%x.\n", __func__,
591 current->domain->domain_id, id);
593 if (check_cache(current->domain, id)) {
594 atomic_inc(&ste_bin_pol.gt_cachehit_count);
595 return ACM_ACCESS_PERMITTED;
596 }
597 atomic_inc(&ste_bin_pol.gt_eval_count);
598 subj = current->domain;
599 obj = get_domain_by_id(id);
601 if (share_common_type(subj, obj)) {
602 cache_result(subj, obj);
603 ret = ACM_ACCESS_PERMITTED;
604 } else {
605 atomic_inc(&ste_bin_pol.gt_denied_count);
606 printkd("%s: ACCESS DENIED!\n", __func__);
607 ret = ACM_ACCESS_DENIED;
608 }
609 if (obj != NULL)
610 put_domain(obj);
611 return ret;
612 }
615 /* since setting up grant tables involves some implicit information
616 flow from the creating domain to the domain that is setup, we
617 check types in addition to the general authorization */
618 static int
619 ste_pre_grant_setup (domid_t id) {
620 struct domain *obj, *subj;
621 int ret;
622 traceprintk("%s: dom%x-->dom%x.\n", __func__,
623 current->domain->domain_id, id);
625 if (check_cache(current->domain, id)) {
626 atomic_inc(&ste_bin_pol.gt_cachehit_count);
627 return ACM_ACCESS_PERMITTED;
628 }
629 atomic_inc(&ste_bin_pol.gt_eval_count);
630 /* a) check authorization (eventually use specific capabilities) */
631 if (!IS_PRIV(current->domain)) {
632 printk("%s: Grant table management authorization denied ERROR!\n", __func__);
633 return ACM_ACCESS_DENIED;
634 }
635 /* b) check types */
636 subj = current->domain;
637 obj = get_domain_by_id(id);
639 if (share_common_type(subj, obj)) {
640 cache_result(subj, obj);
641 ret = ACM_ACCESS_PERMITTED;
642 } else {
643 atomic_inc(&ste_bin_pol.gt_denied_count);
644 ret = ACM_ACCESS_DENIED;
645 }
646 if (obj != NULL)
647 put_domain(obj);
648 return ret;
649 }
651 /* -------- DOMAIN-Requested Decision hooks -----------*/
653 static int
654 ste_sharing(ssidref_t ssidref1, ssidref_t ssidref2) {
655 if (have_common_type (
656 GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref1),
657 GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref2)
658 ))
659 return ACM_ACCESS_PERMITTED;
660 else
661 return ACM_ACCESS_DENIED;
662 }
665 /* now define the hook structure similarly to LSM */
666 struct acm_operations acm_simple_type_enforcement_ops = {
668 /* policy management services */
669 .init_domain_ssid = ste_init_domain_ssid,
670 .free_domain_ssid = ste_free_domain_ssid,
671 .dump_binary_policy = ste_dump_policy,
672 .set_binary_policy = ste_set_policy,
673 .dump_statistics = ste_dump_stats,
674 .dump_ssid_types = ste_dump_ssid_types,
676 /* domain management control hooks */
677 .pre_domain_create = ste_pre_domain_create,
678 .post_domain_create = NULL,
679 .fail_domain_create = NULL,
680 .post_domain_destroy = ste_post_domain_destroy,
682 /* event channel control hooks */
683 .pre_eventchannel_unbound = ste_pre_eventchannel_unbound,
684 .fail_eventchannel_unbound = NULL,
685 .pre_eventchannel_interdomain = ste_pre_eventchannel_interdomain,
686 .fail_eventchannel_interdomain = NULL,
688 /* grant table control hooks */
689 .pre_grant_map_ref = ste_pre_grant_map_ref,
690 .fail_grant_map_ref = NULL,
691 .pre_grant_setup = ste_pre_grant_setup,
692 .fail_grant_setup = NULL,
693 .sharing = ste_sharing,
694 };
696 /*
697 * Local variables:
698 * mode: C
699 * c-set-style: "BSD"
700 * c-basic-offset: 4
701 * tab-width: 4
702 * indent-tabs-mode: nil
703 * End:
704 */