ia64/xen-unstable

view xen/acm/acm_simple_type_enforcement_hooks.c @ 11901:d088aec406c0

[ACM] Add missing alignment operations for the ACM module.

Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
author kfraser@localhost.localdomain
date Thu Oct 19 15:14:25 2006 +0100 (2006-10-19)
parents 58a3a7849216
children 271ffb1c12eb
line source
1 /****************************************************************
2 * acm_simple_type_enforcement_hooks.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 *
6 * Author:
7 * Reiner Sailer <sailer@watson.ibm.com>
8 *
9 * Contributors:
10 * Stefan Berger <stefanb@watson.ibm.com>
11 * support for network order binary policies
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation, version 2 of the
16 * License.
17 *
18 * sHype Simple Type Enforcement for Xen
19 * STE allows to control which domains can setup sharing
20 * (eventchannels right now) with which other domains. Hooks
21 * are defined and called throughout Xen when domains bind to
22 * shared resources (setup eventchannels) and a domain is allowed
23 * to setup sharing with another domain if and only if both domains
24 * share at least on common type.
25 *
26 */
28 #include <xen/lib.h>
29 #include <asm/types.h>
30 #include <asm/current.h>
31 #include <acm/acm_hooks.h>
32 #include <asm/atomic.h>
33 #include <acm/acm_endian.h>
35 /* local cache structures for STE policy */
36 struct ste_binary_policy ste_bin_pol;
38 static inline int have_common_type (ssidref_t ref1, ssidref_t ref2) {
39 int i;
40 for(i=0; i< ste_bin_pol.max_types; i++)
41 if ( ste_bin_pol.ssidrefs[ref1*ste_bin_pol.max_types + i] &&
42 ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i]) {
43 printkd("%s: common type #%02x.\n", __func__, i);
44 return 1;
45 }
46 return 0;
47 }
49 /* Helper function: return = (subj and obj share a common type) */
50 static int share_common_type(struct domain *subj, struct domain *obj)
51 {
52 ssidref_t ref_s, ref_o;
53 int ret;
55 if ((subj == NULL) || (obj == NULL) || (subj->ssid == NULL) || (obj->ssid == NULL))
56 return 0;
57 read_lock(&acm_bin_pol_rwlock);
58 /* lookup the policy-local ssids */
59 ref_s = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
60 (struct acm_ssid_domain *)subj->ssid)))->ste_ssidref;
61 ref_o = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
62 (struct acm_ssid_domain *)obj->ssid)))->ste_ssidref;
63 /* check whether subj and obj share a common ste type */
64 ret = have_common_type(ref_s, ref_o);
65 read_unlock(&acm_bin_pol_rwlock);
66 return ret;
67 }
69 /*
70 * Initializing STE policy (will be filled by policy partition
71 * using setpolicy command)
72 */
73 int acm_init_ste_policy(void)
74 {
75 /* minimal startup policy; policy write-locked already */
76 ste_bin_pol.max_types = 1;
77 ste_bin_pol.max_ssidrefs = 2;
78 ste_bin_pol.ssidrefs = (domaintype_t *)xmalloc_array(domaintype_t, 2);
79 memset(ste_bin_pol.ssidrefs, 0, 2);
81 if (ste_bin_pol.ssidrefs == NULL)
82 return ACM_INIT_SSID_ERROR;
84 /* initialize state so that dom0 can start up and communicate with itself */
85 ste_bin_pol.ssidrefs[1] = 1;
87 /* init stats */
88 atomic_set(&(ste_bin_pol.ec_eval_count), 0);
89 atomic_set(&(ste_bin_pol.ec_denied_count), 0);
90 atomic_set(&(ste_bin_pol.ec_cachehit_count), 0);
91 atomic_set(&(ste_bin_pol.gt_eval_count), 0);
92 atomic_set(&(ste_bin_pol.gt_denied_count), 0);
93 atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
94 return ACM_OK;
95 }
98 /* ste initialization function hooks */
99 static int
100 ste_init_domain_ssid(void **ste_ssid, ssidref_t ssidref)
101 {
102 int i;
103 struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid);
104 traceprintk("%s.\n", __func__);
106 if (ste_ssidp == NULL)
107 return ACM_INIT_SSID_ERROR;
109 /* get policy-local ssid reference */
110 ste_ssidp->ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
111 if ((ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) ||
112 (ste_ssidp->ste_ssidref == ACM_DEFAULT_LOCAL_SSID)) {
113 printkd("%s: ERROR ste_ssidref (%x) undefined or unset (0).\n",
114 __func__, ste_ssidp->ste_ssidref);
115 xfree(ste_ssidp);
116 return ACM_INIT_SSID_ERROR;
117 }
118 /* clean ste cache */
119 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
120 ste_ssidp->ste_cache[i].valid = ACM_STE_free;
122 (*ste_ssid) = ste_ssidp;
123 printkd("%s: determined ste_ssidref to %x.\n",
124 __func__, ste_ssidp->ste_ssidref);
125 return ACM_OK;
126 }
129 static void
130 ste_free_domain_ssid(void *ste_ssid)
131 {
132 traceprintk("%s.\n", __func__);
133 xfree(ste_ssid);
134 return;
135 }
137 /* dump type enforcement cache; policy read-locked already */
138 static int
139 ste_dump_policy(u8 *buf, u32 buf_size) {
140 struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer *)buf;
141 int ret = 0;
143 if (buf_size < sizeof(struct acm_ste_policy_buffer))
144 return -EINVAL;
146 ste_buf->ste_max_types = htonl(ste_bin_pol.max_types);
147 ste_buf->ste_max_ssidrefs = htonl(ste_bin_pol.max_ssidrefs);
148 ste_buf->policy_code = htonl(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
149 ste_buf->ste_ssid_offset = htonl(sizeof(struct acm_ste_policy_buffer));
150 ret = ntohl(ste_buf->ste_ssid_offset) +
151 sizeof(domaintype_t)*ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types;
153 ret = (ret + 7) & ~7;
155 if (buf_size < ret)
156 return -EINVAL;
158 /* now copy buffer over */
159 arrcpy(buf + ntohl(ste_buf->ste_ssid_offset),
160 ste_bin_pol.ssidrefs,
161 sizeof(domaintype_t),
162 ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types);
164 return ret;
165 }
167 /* ste_init_state is called when a policy is changed to detect violations (return != 0).
168 * from a security point of view, we simulate that all running domains are re-started and
169 * all sharing decisions are replayed to detect violations or current sharing behavior
170 * (right now: event_channels, future: also grant_tables)
171 */
172 static int
173 ste_init_state(struct acm_ste_policy_buffer *ste_buf, domaintype_t *ssidrefs)
174 {
175 int violation = 1;
176 struct ste_ssid *ste_ssid, *ste_rssid;
177 ssidref_t ste_ssidref, ste_rssidref;
178 struct domain **pd, *rdom;
179 domid_t rdomid;
180 grant_entry_t sha_copy;
181 int port, i;
183 read_lock(&domlist_lock); /* go by domain? or directly by global? event/grant list */
184 /* go through all domains and adjust policy as if this domain was started now */
185 pd = &domain_list;
186 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
187 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
188 (struct acm_ssid_domain *)(*pd)->ssid);
189 ste_ssidref = ste_ssid->ste_ssidref;
190 traceprintk("%s: validating policy for eventch domain %x (ste-Ref=%x).\n",
191 __func__, (*pd)->domain_id, ste_ssidref);
192 /* a) check for event channel conflicts */
193 for (port=0; port < NR_EVTCHN_BUCKETS; port++) {
194 spin_lock(&(*pd)->evtchn_lock);
195 if ((*pd)->evtchn[port] == NULL) {
196 spin_unlock(&(*pd)->evtchn_lock);
197 continue;
198 }
199 if ((*pd)->evtchn[port]->state == ECS_INTERDOMAIN) {
200 rdom = (*pd)->evtchn[port]->u.interdomain.remote_dom;
201 rdomid = rdom->domain_id;
202 /* rdom now has remote domain */
203 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
204 (struct acm_ssid_domain *)(rdom->ssid));
205 ste_rssidref = ste_rssid->ste_ssidref;
206 } else if ((*pd)->evtchn[port]->state == ECS_UNBOUND) {
207 rdomid = (*pd)->evtchn[port]->u.unbound.remote_domid;
208 if ((rdom = find_domain_by_id(rdomid)) == NULL) {
209 printk("%s: Error finding domain to id %x!\n", __func__, rdomid);
210 goto out;
211 }
212 /* rdom now has remote domain */
213 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
214 (struct acm_ssid_domain *)(rdom->ssid));
215 ste_rssidref = ste_rssid->ste_ssidref;
216 put_domain(rdom);
217 } else {
218 spin_unlock(&(*pd)->evtchn_lock);
219 continue; /* port unused */
220 }
221 spin_unlock(&(*pd)->evtchn_lock);
223 /* rdom now has remote domain */
224 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
225 (struct acm_ssid_domain *)(rdom->ssid));
226 ste_rssidref = ste_rssid->ste_ssidref;
227 traceprintk("%s: eventch: domain %x (ssidref %x) --> domain %x (rssidref %x) used (port %x).\n",
228 __func__, (*pd)->domain_id, ste_ssidref, rdom->domain_id, ste_rssidref, port);
229 /* check whether on subj->ssid, obj->ssid share a common type*/
230 if (!have_common_type(ste_ssidref, ste_rssidref)) {
231 printkd("%s: Policy violation in event channel domain %x -> domain %x.\n",
232 __func__, (*pd)->domain_id, rdomid);
233 goto out;
234 }
235 }
236 /* b) check for grant table conflicts on shared pages */
237 if ((*pd)->grant_table->shared == NULL) {
238 printkd("%s: Grant ... sharing for domain %x not setup!\n", __func__, (*pd)->domain_id);
239 continue;
240 }
241 for ( i = 0; i < NR_GRANT_ENTRIES; i++ ) {
242 sha_copy = (*pd)->grant_table->shared[i];
243 if ( sha_copy.flags ) {
244 printkd("%s: grant dom (%hu) SHARED (%d) flags:(%hx) dom:(%hu) frame:(%lx)\n",
245 __func__, (*pd)->domain_id, i, sha_copy.flags, sha_copy.domid,
246 (unsigned long)sha_copy.frame);
247 rdomid = sha_copy.domid;
248 if ((rdom = find_domain_by_id(rdomid)) == NULL) {
249 printkd("%s: domain not found ERROR!\n", __func__);
250 goto out;
251 };
252 /* rdom now has remote domain */
253 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
254 (struct acm_ssid_domain *)(rdom->ssid));
255 ste_rssidref = ste_rssid->ste_ssidref;
256 put_domain(rdom);
257 if (!have_common_type(ste_ssidref, ste_rssidref)) {
258 printkd("%s: Policy violation in grant table sharing domain %x -> domain %x.\n",
259 __func__, (*pd)->domain_id, rdomid);
260 goto out;
261 }
262 }
263 }
264 }
265 violation = 0;
266 out:
267 read_unlock(&domlist_lock);
268 return violation;
269 /* returning "violation != 0" means that existing sharing between domains would not
270 * have been allowed if the new policy had been enforced before the sharing; for ste,
271 * this means that there are at least 2 domains that have established sharing through
272 * event-channels or grant-tables but these two domains don't have no longer a common
273 * type in their typesets referenced by their ssidrefs */
274 }
276 /* set new policy; policy write-locked already */
277 static int
278 ste_set_policy(u8 *buf, u32 buf_size)
279 {
280 struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer *)buf;
281 void *ssidrefsbuf;
282 struct ste_ssid *ste_ssid;
283 struct domain **pd;
284 int i;
286 if (buf_size < sizeof(struct acm_ste_policy_buffer))
287 return -EINVAL;
289 /* Convert endianess of policy */
290 ste_buf->policy_code = ntohl(ste_buf->policy_code);
291 ste_buf->policy_version = ntohl(ste_buf->policy_version);
292 ste_buf->ste_max_types = ntohl(ste_buf->ste_max_types);
293 ste_buf->ste_max_ssidrefs = ntohl(ste_buf->ste_max_ssidrefs);
294 ste_buf->ste_ssid_offset = ntohl(ste_buf->ste_ssid_offset);
296 /* policy type and version checks */
297 if ((ste_buf->policy_code != ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) ||
298 (ste_buf->policy_version != ACM_STE_VERSION))
299 return -EINVAL;
301 /* 1. create and copy-in new ssidrefs buffer */
302 ssidrefsbuf = xmalloc_array(u8, sizeof(domaintype_t)*ste_buf->ste_max_types*ste_buf->ste_max_ssidrefs);
303 if (ssidrefsbuf == NULL) {
304 return -ENOMEM;
305 }
306 if (ste_buf->ste_ssid_offset + sizeof(domaintype_t) * ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types > buf_size)
307 goto error_free;
309 arrcpy(ssidrefsbuf,
310 buf + ste_buf->ste_ssid_offset,
311 sizeof(domaintype_t),
312 ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
314 /* 2. now re-calculate sharing decisions based on running domains;
315 * this can fail if new policy is conflicting with sharing of running domains
316 * now: reject violating new policy; future: adjust sharing through revoking sharing */
317 if (ste_init_state(ste_buf, (domaintype_t *)ssidrefsbuf)) {
318 printk("%s: New policy conflicts with running domains. Policy load aborted.\n", __func__);
319 goto error_free; /* new policy conflicts with sharing of running domains */
320 }
321 /* 3. replace old policy (activate new policy) */
322 ste_bin_pol.max_types = ste_buf->ste_max_types;
323 ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
324 xfree(ste_bin_pol.ssidrefs);
325 ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
327 /* clear all ste caches */
328 read_lock(&domlist_lock);
329 pd = &domain_list;
330 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
331 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
332 (struct acm_ssid_domain *)(*pd)->ssid);
333 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
334 ste_ssid->ste_cache[i].valid = ACM_STE_free;
335 }
336 read_unlock(&domlist_lock);
337 return ACM_OK;
339 error_free:
340 printk("%s: ERROR setting policy.\n", __func__);
341 xfree(ssidrefsbuf);
342 return -EFAULT;
343 }
345 static int
346 ste_dump_stats(u8 *buf, u16 buf_len)
347 {
348 struct acm_ste_stats_buffer stats;
350 /* now send the hook counts to user space */
351 stats.ec_eval_count = htonl(atomic_read(&ste_bin_pol.ec_eval_count));
352 stats.gt_eval_count = htonl(atomic_read(&ste_bin_pol.gt_eval_count));
353 stats.ec_denied_count = htonl(atomic_read(&ste_bin_pol.ec_denied_count));
354 stats.gt_denied_count = htonl(atomic_read(&ste_bin_pol.gt_denied_count));
355 stats.ec_cachehit_count = htonl(atomic_read(&ste_bin_pol.ec_cachehit_count));
356 stats.gt_cachehit_count = htonl(atomic_read(&ste_bin_pol.gt_cachehit_count));
358 if (buf_len < sizeof(struct acm_ste_stats_buffer))
359 return -ENOMEM;
361 memcpy(buf, &stats, sizeof(struct acm_ste_stats_buffer));
362 return sizeof(struct acm_ste_stats_buffer);
363 }
365 static int
366 ste_dump_ssid_types(ssidref_t ssidref, u8 *buf, u16 len)
367 {
368 int i;
370 /* fill in buffer */
371 if (ste_bin_pol.max_types > len)
372 return -EFAULT;
374 if (ssidref >= ste_bin_pol.max_ssidrefs)
375 return -EFAULT;
377 /* read types for chwall ssidref */
378 for(i=0; i< ste_bin_pol.max_types; i++) {
379 if (ste_bin_pol.ssidrefs[ssidref * ste_bin_pol.max_types + i])
380 buf[i] = 1;
381 else
382 buf[i] = 0;
383 }
384 return ste_bin_pol.max_types;
385 }
387 /* we need to go through this before calling the hooks,
388 * returns 1 == cache hit */
389 static int inline
390 check_cache(struct domain *dom, domid_t rdom) {
391 struct ste_ssid *ste_ssid;
392 int i;
394 printkd("checking cache: %x --> %x.\n", dom->domain_id, rdom);
396 if (dom->ssid == NULL)
397 return 0;
398 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
399 (struct acm_ssid_domain *)(dom->ssid));
401 for(i=0; i< ACM_TE_CACHE_SIZE; i++) {
402 if ((ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
403 (ste_ssid->ste_cache[i].id == rdom)) {
404 printkd("cache hit (entry %x, id= %x!\n", i, ste_ssid->ste_cache[i].id);
405 return 1;
406 }
407 }
408 return 0;
409 }
412 /* we only get here if there is NO entry yet; no duplication check! */
413 static void inline
414 cache_result(struct domain *subj, struct domain *obj) {
415 struct ste_ssid *ste_ssid;
416 int i;
417 printkd("caching from doms: %x --> %x.\n", subj->domain_id, obj->domain_id);
418 if (subj->ssid == NULL)
419 return;
420 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
421 (struct acm_ssid_domain *)(subj)->ssid);
422 for(i=0; i< ACM_TE_CACHE_SIZE; i++)
423 if (ste_ssid->ste_cache[i].valid == ACM_STE_free)
424 break;
425 if (i< ACM_TE_CACHE_SIZE) {
426 ste_ssid->ste_cache[i].valid = ACM_STE_valid;
427 ste_ssid->ste_cache[i].id = obj->domain_id;
428 } else
429 printk ("Cache of dom %x is full!\n", subj->domain_id);
430 }
432 /* deletes entries for domain 'id' from all caches (re-use) */
433 static void inline
434 clean_id_from_cache(domid_t id)
435 {
436 struct ste_ssid *ste_ssid;
437 int i;
438 struct domain **pd;
439 struct acm_ssid_domain *ssid;
441 printkd("deleting cache for dom %x.\n", id);
442 read_lock(&domlist_lock); /* look through caches of all domains */
443 pd = &domain_list;
444 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
445 ssid = (struct acm_ssid_domain *)((*pd)->ssid);
447 if (ssid == NULL)
448 continue; /* hanging domain structure, no ssid any more ... */
449 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssid);
450 if (!ste_ssid) {
451 printk("%s: deleting ID from cache ERROR (no ste_ssid)!\n",
452 __func__);
453 goto out;
454 }
455 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
456 if ((ste_ssid->ste_cache[i].valid == ACM_STE_valid) &&
457 (ste_ssid->ste_cache[i].id == id))
458 ste_ssid->ste_cache[i].valid = ACM_STE_free;
459 }
460 out:
461 read_unlock(&domlist_lock);
462 }
464 /***************************
465 * Authorization functions
466 **************************/
467 static int
468 ste_pre_domain_create(void *subject_ssid, ssidref_t ssidref)
469 {
470 /* check for ssidref in range for policy */
471 ssidref_t ste_ssidref;
472 traceprintk("%s.\n", __func__);
474 read_lock(&acm_bin_pol_rwlock);
475 ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
476 if (ste_ssidref == ACM_DEFAULT_LOCAL_SSID) {
477 printk("%s: ERROR STE SSID is NOT SET but policy enforced.\n", __func__);
478 read_unlock(&acm_bin_pol_rwlock);
479 return ACM_ACCESS_DENIED; /* catching and indicating config error */
480 }
481 if (ste_ssidref >= ste_bin_pol.max_ssidrefs) {
482 printk("%s: ERROR ste_ssidref > max(%x).\n",
483 __func__, ste_bin_pol.max_ssidrefs-1);
484 read_unlock(&acm_bin_pol_rwlock);
485 return ACM_ACCESS_DENIED;
486 }
487 read_unlock(&acm_bin_pol_rwlock);
488 return ACM_ACCESS_PERMITTED;
489 }
491 static void
492 ste_post_domain_destroy(void *subject_ssid, domid_t id)
493 {
494 /* clean all cache entries for destroyed domain (might be re-used) */
495 clean_id_from_cache(id);
496 }
498 /* -------- EVENTCHANNEL OPERATIONS -----------*/
499 static int
500 ste_pre_eventchannel_unbound(domid_t id1, domid_t id2) {
501 struct domain *subj, *obj;
502 int ret;
503 traceprintk("%s: dom%x-->dom%x.\n", __func__,
504 (id1 == DOMID_SELF) ? current->domain->domain_id : id1,
505 (id2 == DOMID_SELF) ? current->domain->domain_id : id2);
507 if (id1 == DOMID_SELF) id1 = current->domain->domain_id;
508 if (id2 == DOMID_SELF) id2 = current->domain->domain_id;
510 subj = find_domain_by_id(id1);
511 obj = find_domain_by_id(id2);
512 if ((subj == NULL) || (obj == NULL)) {
513 ret = ACM_ACCESS_DENIED;
514 goto out;
515 }
516 /* cache check late */
517 if (check_cache(subj, obj->domain_id)) {
518 atomic_inc(&ste_bin_pol.ec_cachehit_count);
519 ret = ACM_ACCESS_PERMITTED;
520 goto out;
521 }
522 atomic_inc(&ste_bin_pol.ec_eval_count);
524 if (share_common_type(subj, obj)) {
525 cache_result(subj, obj);
526 ret = ACM_ACCESS_PERMITTED;
527 } else {
528 atomic_inc(&ste_bin_pol.ec_denied_count);
529 ret = ACM_ACCESS_DENIED;
530 }
531 out:
532 if (obj != NULL)
533 put_domain(obj);
534 if (subj != NULL)
535 put_domain(subj);
536 return ret;
537 }
539 static int
540 ste_pre_eventchannel_interdomain(domid_t id)
541 {
542 struct domain *subj=NULL, *obj=NULL;
543 int ret;
545 traceprintk("%s: dom%x-->dom%x.\n", __func__,
546 current->domain->domain_id,
547 (id == DOMID_SELF) ? current->domain->domain_id : id);
549 /* following is a bit longer but ensures that we
550 * "put" only domains that we where "find"-ing
551 */
552 if (id == DOMID_SELF) id = current->domain->domain_id;
554 subj = current->domain;
555 obj = find_domain_by_id(id);
556 if (obj == NULL) {
557 ret = ACM_ACCESS_DENIED;
558 goto out;
559 }
561 /* cache check late, but evtchn is not on performance critical path */
562 if (check_cache(subj, obj->domain_id)) {
563 atomic_inc(&ste_bin_pol.ec_cachehit_count);
564 ret = ACM_ACCESS_PERMITTED;
565 goto out;
566 }
568 atomic_inc(&ste_bin_pol.ec_eval_count);
570 if (share_common_type(subj, obj)) {
571 cache_result(subj, obj);
572 ret = ACM_ACCESS_PERMITTED;
573 } else {
574 atomic_inc(&ste_bin_pol.ec_denied_count);
575 ret = ACM_ACCESS_DENIED;
576 }
577 out:
578 if (obj != NULL)
579 put_domain(obj);
580 return ret;
581 }
583 /* -------- SHARED MEMORY OPERATIONS -----------*/
585 static int
586 ste_pre_grant_map_ref (domid_t id) {
587 struct domain *obj, *subj;
588 int ret;
589 traceprintk("%s: dom%x-->dom%x.\n", __func__,
590 current->domain->domain_id, id);
592 if (check_cache(current->domain, id)) {
593 atomic_inc(&ste_bin_pol.gt_cachehit_count);
594 return ACM_ACCESS_PERMITTED;
595 }
596 atomic_inc(&ste_bin_pol.gt_eval_count);
597 subj = current->domain;
598 obj = find_domain_by_id(id);
600 if (share_common_type(subj, obj)) {
601 cache_result(subj, obj);
602 ret = ACM_ACCESS_PERMITTED;
603 } else {
604 atomic_inc(&ste_bin_pol.gt_denied_count);
605 printkd("%s: ACCESS DENIED!\n", __func__);
606 ret = ACM_ACCESS_DENIED;
607 }
608 if (obj != NULL)
609 put_domain(obj);
610 return ret;
611 }
614 /* since setting up grant tables involves some implicit information
615 flow from the creating domain to the domain that is setup, we
616 check types in addition to the general authorization */
617 static int
618 ste_pre_grant_setup (domid_t id) {
619 struct domain *obj, *subj;
620 int ret;
621 traceprintk("%s: dom%x-->dom%x.\n", __func__,
622 current->domain->domain_id, id);
624 if (check_cache(current->domain, id)) {
625 atomic_inc(&ste_bin_pol.gt_cachehit_count);
626 return ACM_ACCESS_PERMITTED;
627 }
628 atomic_inc(&ste_bin_pol.gt_eval_count);
629 /* a) check authorization (eventually use specific capabilities) */
630 if (!IS_PRIV(current->domain)) {
631 printk("%s: Grant table management authorization denied ERROR!\n", __func__);
632 return ACM_ACCESS_DENIED;
633 }
634 /* b) check types */
635 subj = current->domain;
636 obj = find_domain_by_id(id);
638 if (share_common_type(subj, obj)) {
639 cache_result(subj, obj);
640 ret = ACM_ACCESS_PERMITTED;
641 } else {
642 atomic_inc(&ste_bin_pol.gt_denied_count);
643 ret = ACM_ACCESS_DENIED;
644 }
645 if (obj != NULL)
646 put_domain(obj);
647 return ret;
648 }
650 /* -------- DOMAIN-Requested Decision hooks -----------*/
652 static int
653 ste_sharing(ssidref_t ssidref1, ssidref_t ssidref2) {
654 if (have_common_type (
655 GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref1),
656 GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref2)
657 ))
658 return ACM_ACCESS_PERMITTED;
659 else
660 return ACM_ACCESS_DENIED;
661 }
664 /* now define the hook structure similarly to LSM */
665 struct acm_operations acm_simple_type_enforcement_ops = {
667 /* policy management services */
668 .init_domain_ssid = ste_init_domain_ssid,
669 .free_domain_ssid = ste_free_domain_ssid,
670 .dump_binary_policy = ste_dump_policy,
671 .set_binary_policy = ste_set_policy,
672 .dump_statistics = ste_dump_stats,
673 .dump_ssid_types = ste_dump_ssid_types,
675 /* domain management control hooks */
676 .pre_domain_create = ste_pre_domain_create,
677 .post_domain_create = NULL,
678 .fail_domain_create = NULL,
679 .post_domain_destroy = ste_post_domain_destroy,
681 /* event channel control hooks */
682 .pre_eventchannel_unbound = ste_pre_eventchannel_unbound,
683 .fail_eventchannel_unbound = NULL,
684 .pre_eventchannel_interdomain = ste_pre_eventchannel_interdomain,
685 .fail_eventchannel_interdomain = NULL,
687 /* grant table control hooks */
688 .pre_grant_map_ref = ste_pre_grant_map_ref,
689 .fail_grant_map_ref = NULL,
690 .pre_grant_setup = ste_pre_grant_setup,
691 .fail_grant_setup = NULL,
692 .sharing = ste_sharing,
693 };
695 /*
696 * Local variables:
697 * mode: C
698 * c-set-style: "BSD"
699 * c-basic-offset: 4
700 * tab-width: 4
701 * indent-tabs-mode: nil
702 * End:
703 */