ia64/xen-unstable

view xen/acm/acm_simple_type_enforcement_hooks.c @ 5860:d63b100b327a

Fix restart/poweroff properly. From aq.
author kaf24@firebug.cl.cam.ac.uk
date Tue Jul 26 10:09:06 2005 +0000 (2005-07-26)
parents 8ad10be47849
children d18f732c0a5f e173a853dc46
line source
1 /****************************************************************
2 * acm_simple_type_enforcement_hooks.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 *
6 * Author:
7 * Reiner Sailer <sailer@watson.ibm.com>
8 *
9 * Contributors:
10 * Stefan Berger <stefanb@watson.ibm.com>
11 * support for network order binary policies
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation, version 2 of the
16 * License.
17 *
18 * sHype Simple Type Enforcement for Xen
19 * STE allows to control which domains can setup sharing
20 * (eventchannels right now) with which other domains. Hooks
21 * are defined and called throughout Xen when domains bind to
22 * shared resources (setup eventchannels) and a domain is allowed
23 * to setup sharing with another domain if and only if both domains
24 * share at least on common type.
25 *
26 */
27 #include <xen/lib.h>
28 #include <asm/types.h>
29 #include <asm/current.h>
30 #include <acm/acm_hooks.h>
31 #include <asm/atomic.h>
32 #include <acm/acm_endian.h>
34 /* local cache structures for STE policy */
35 struct ste_binary_policy ste_bin_pol;
37 static inline int have_common_type (ssidref_t ref1, ssidref_t ref2) {
38 int i;
39 for(i=0; i< ste_bin_pol.max_types; i++)
40 if ( ste_bin_pol.ssidrefs[ref1*ste_bin_pol.max_types + i] &&
41 ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i]) {
42 printkd("%s: common type #%02x.\n", __func__, i);
43 return 1;
44 }
45 return 0;
46 }
48 /* Helper function: return = (subj and obj share a common type) */
49 static int share_common_type(struct domain *subj, struct domain *obj)
50 {
51 ssidref_t ref_s, ref_o;
52 int ret;
54 if ((subj == NULL) || (obj == NULL) || (subj->ssid == NULL) || (obj->ssid == NULL))
55 return 0;
56 read_lock(&acm_bin_pol_rwlock);
57 /* lookup the policy-local ssids */
58 ref_s = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
59 (struct acm_ssid_domain *)subj->ssid)))->ste_ssidref;
60 ref_o = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
61 (struct acm_ssid_domain *)obj->ssid)))->ste_ssidref;
62 /* check whether subj and obj share a common ste type */
63 ret = have_common_type(ref_s, ref_o);
64 read_unlock(&acm_bin_pol_rwlock);
65 return ret;
66 }
68 /*
69 * Initializing STE policy (will be filled by policy partition
70 * using setpolicy command)
71 */
72 int acm_init_ste_policy(void)
73 {
74 /* minimal startup policy; policy write-locked already */
75 ste_bin_pol.max_types = 1;
76 ste_bin_pol.max_ssidrefs = 2;
77 ste_bin_pol.ssidrefs = (domaintype_t *)xmalloc_array(domaintype_t, 2);
78 memset(ste_bin_pol.ssidrefs, 0, 2);
80 if (ste_bin_pol.ssidrefs == NULL)
81 return ACM_INIT_SSID_ERROR;
83 /* initialize state so that dom0 can start up and communicate with itself */
84 ste_bin_pol.ssidrefs[1] = 1;
86 /* init stats */
87 atomic_set(&(ste_bin_pol.ec_eval_count), 0);
88 atomic_set(&(ste_bin_pol.ec_denied_count), 0);
89 atomic_set(&(ste_bin_pol.ec_cachehit_count), 0);
90 atomic_set(&(ste_bin_pol.gt_eval_count), 0);
91 atomic_set(&(ste_bin_pol.gt_denied_count), 0);
92 atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
93 return ACM_OK;
94 }
97 /* ste initialization function hooks */
98 static int
99 ste_init_domain_ssid(void **ste_ssid, ssidref_t ssidref)
100 {
101 int i;
102 struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid);
103 traceprintk("%s.\n", __func__);
105 if (ste_ssidp == NULL)
106 return ACM_INIT_SSID_ERROR;
108 /* get policy-local ssid reference */
109 ste_ssidp->ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
110 if ((ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) ||
111 (ste_ssidp->ste_ssidref == ACM_DEFAULT_LOCAL_SSID)) {
112 printkd("%s: ERROR ste_ssidref (%x) undefined or unset (0).\n",
113 __func__, ste_ssidp->ste_ssidref);
114 xfree(ste_ssidp);
115 return ACM_INIT_SSID_ERROR;
116 }
117 /* clean ste cache */
118 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
119 ste_ssidp->ste_cache[i].valid = FREE;
121 (*ste_ssid) = ste_ssidp;
122 printkd("%s: determined ste_ssidref to %x.\n",
123 __func__, ste_ssidp->ste_ssidref);
124 return ACM_OK;
125 }
128 static void
129 ste_free_domain_ssid(void *ste_ssid)
130 {
131 traceprintk("%s.\n", __func__);
132 if (ste_ssid != NULL)
133 xfree(ste_ssid);
134 return;
135 }
137 /* dump type enforcement cache; policy read-locked already */
138 static int
139 ste_dump_policy(u8 *buf, u16 buf_size) {
140 struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer *)buf;
141 int ret = 0;
143 ste_buf->ste_max_types = htons(ste_bin_pol.max_types);
144 ste_buf->ste_max_ssidrefs = htons(ste_bin_pol.max_ssidrefs);
145 ste_buf->policy_code = htons(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
146 ste_buf->ste_ssid_offset = htons(sizeof(struct acm_ste_policy_buffer));
147 ret = ntohs(ste_buf->ste_ssid_offset) +
148 sizeof(domaintype_t)*ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types;
150 /* now copy buffer over */
151 arrcpy(buf + ntohs(ste_buf->ste_ssid_offset),
152 ste_bin_pol.ssidrefs,
153 sizeof(domaintype_t),
154 ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types);
156 return ret;
157 }
159 /* ste_init_state is called when a policy is changed to detect violations (return != 0).
160 * from a security point of view, we simulate that all running domains are re-started and
161 * all sharing decisions are replayed to detect violations or current sharing behavior
162 * (right now: event_channels, future: also grant_tables)
163 */
164 static int
165 ste_init_state(struct acm_ste_policy_buffer *ste_buf, domaintype_t *ssidrefs)
166 {
167 int violation = 1;
168 struct ste_ssid *ste_ssid, *ste_rssid;
169 ssidref_t ste_ssidref, ste_rssidref;
170 struct domain **pd, *rdom;
171 domid_t rdomid;
172 grant_entry_t sha_copy;
173 int port, i;
175 read_lock(&domlist_lock); /* go by domain? or directly by global? event/grant list */
176 /* go through all domains and adjust policy as if this domain was started now */
177 pd = &domain_list;
178 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
179 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
180 (struct acm_ssid_domain *)(*pd)->ssid);
181 ste_ssidref = ste_ssid->ste_ssidref;
182 traceprintk("%s: validating policy for eventch domain %x (ste-Ref=%x).\n",
183 __func__, (*pd)->domain_id, ste_ssidref);
184 /* a) check for event channel conflicts */
185 for (port=0; port < NR_EVTCHN_BUCKETS; port++) {
186 spin_lock(&(*pd)->evtchn_lock);
187 if ((*pd)->evtchn[port] == NULL) {
188 spin_unlock(&(*pd)->evtchn_lock);
189 continue;
190 }
191 if ((*pd)->evtchn[port]->state == ECS_INTERDOMAIN) {
192 rdom = (*pd)->evtchn[port]->u.interdomain.remote_dom;
193 rdomid = rdom->domain_id;
194 /* rdom now has remote domain */
195 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
196 (struct acm_ssid_domain *)(rdom->ssid));
197 ste_rssidref = ste_rssid->ste_ssidref;
198 } else if ((*pd)->evtchn[port]->state == ECS_UNBOUND) {
199 rdomid = (*pd)->evtchn[port]->u.unbound.remote_domid;
200 if ((rdom = find_domain_by_id(rdomid)) == NULL) {
201 printk("%s: Error finding domain to id %x!\n", __func__, rdomid);
202 goto out;
203 }
204 /* rdom now has remote domain */
205 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
206 (struct acm_ssid_domain *)(rdom->ssid));
207 ste_rssidref = ste_rssid->ste_ssidref;
208 put_domain(rdom);
209 } else {
210 spin_unlock(&(*pd)->evtchn_lock);
211 continue; /* port unused */
212 }
213 spin_unlock(&(*pd)->evtchn_lock);
215 /* rdom now has remote domain */
216 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
217 (struct acm_ssid_domain *)(rdom->ssid));
218 ste_rssidref = ste_rssid->ste_ssidref;
219 traceprintk("%s: eventch: domain %x (ssidref %x) --> domain %x (rssidref %x) used (port %x).\n",
220 __func__, (*pd)->domain_id, ste_ssidref, rdom->domain_id, ste_rssidref, port);
221 /* check whether on subj->ssid, obj->ssid share a common type*/
222 if (!have_common_type(ste_ssidref, ste_rssidref)) {
223 printkd("%s: Policy violation in event channel domain %x -> domain %x.\n",
224 __func__, (*pd)->domain_id, rdomid);
225 goto out;
226 }
227 }
228 /* b) check for grant table conflicts on shared pages */
229 if ((*pd)->grant_table->shared == NULL) {
230 printkd("%s: Grant ... sharing for domain %x not setup!\n", __func__, (*pd)->domain_id);
231 continue;
232 }
233 for ( i = 0; i < NR_GRANT_ENTRIES; i++ ) {
234 sha_copy = (*pd)->grant_table->shared[i];
235 if ( sha_copy.flags ) {
236 printkd("%s: grant dom (%hu) SHARED (%d) flags:(%hx) dom:(%hu) frame:(%lx)\n",
237 __func__, (*pd)->domain_id, i, sha_copy.flags, sha_copy.domid,
238 (unsigned long)sha_copy.frame);
239 rdomid = sha_copy.domid;
240 if ((rdom = find_domain_by_id(rdomid)) == NULL) {
241 printkd("%s: domain not found ERROR!\n", __func__);
242 goto out;
243 };
244 /* rdom now has remote domain */
245 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
246 (struct acm_ssid_domain *)(rdom->ssid));
247 ste_rssidref = ste_rssid->ste_ssidref;
248 put_domain(rdom);
249 if (!have_common_type(ste_ssidref, ste_rssidref)) {
250 printkd("%s: Policy violation in grant table sharing domain %x -> domain %x.\n",
251 __func__, (*pd)->domain_id, rdomid);
252 goto out;
253 }
254 }
255 }
256 }
257 violation = 0;
258 out:
259 read_unlock(&domlist_lock);
260 return violation;
261 /* returning "violation != 0" means that existing sharing between domains would not
262 * have been allowed if the new policy had been enforced before the sharing; for ste,
263 * this means that there are at least 2 domains that have established sharing through
264 * event-channels or grant-tables but these two domains don't have no longer a common
265 * type in their typesets referenced by their ssidrefs */
266 }
268 /* set new policy; policy write-locked already */
269 static int
270 ste_set_policy(u8 *buf, u16 buf_size)
271 {
272 struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer *)buf;
273 void *ssidrefsbuf;
274 struct ste_ssid *ste_ssid;
275 struct domain **pd;
276 int i;
278 /* Convert endianess of policy */
279 ste_buf->policy_code = ntohs(ste_buf->policy_code);
280 ste_buf->ste_max_types = ntohs(ste_buf->ste_max_types);
281 ste_buf->ste_max_ssidrefs = ntohs(ste_buf->ste_max_ssidrefs);
282 ste_buf->ste_ssid_offset = ntohs(ste_buf->ste_ssid_offset);
284 /* 1. create and copy-in new ssidrefs buffer */
285 ssidrefsbuf = xmalloc_array(u8, sizeof(domaintype_t)*ste_buf->ste_max_types*ste_buf->ste_max_ssidrefs);
286 if (ssidrefsbuf == NULL) {
287 return -ENOMEM;
288 }
289 if (ste_buf->ste_ssid_offset + sizeof(domaintype_t) * ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types > buf_size)
290 goto error_free;
292 arrcpy(ssidrefsbuf,
293 buf + ste_buf->ste_ssid_offset,
294 sizeof(domaintype_t),
295 ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
297 /* 2. now re-calculate sharing decisions based on running domains;
298 * this can fail if new policy is conflicting with sharing of running domains
299 * now: reject violating new policy; future: adjust sharing through revoking sharing */
300 if (ste_init_state(ste_buf, (domaintype_t *)ssidrefsbuf)) {
301 printk("%s: New policy conflicts with running domains. Policy load aborted.\n", __func__);
302 goto error_free; /* new policy conflicts with sharing of running domains */
303 }
304 /* 3. replace old policy (activate new policy) */
305 ste_bin_pol.max_types = ste_buf->ste_max_types;
306 ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
307 if (ste_bin_pol.ssidrefs)
308 xfree(ste_bin_pol.ssidrefs);
309 ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
311 /* clear all ste caches */
312 read_lock(&domlist_lock);
313 pd = &domain_list;
314 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
315 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
316 (struct acm_ssid_domain *)(*pd)->ssid);
317 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
318 ste_ssid->ste_cache[i].valid = FREE;
319 }
320 read_unlock(&domlist_lock);
321 return ACM_OK;
323 error_free:
324 printk("%s: ERROR setting policy.\n", __func__);
325 if (ssidrefsbuf != NULL) xfree(ssidrefsbuf);
326 return -EFAULT;
327 }
329 static int
330 ste_dump_stats(u8 *buf, u16 buf_len)
331 {
332 struct acm_ste_stats_buffer stats;
334 #ifdef ACM_DEBUG
335 int i;
336 struct ste_ssid *ste_ssid;
337 struct domain **pd;
339 printk("ste: Decision caches:\n");
340 /* go through all domains and adjust policy as if this domain was started now */
341 read_lock(&domlist_lock); /* go by domain? or directly by global? event/grant list */
342 pd = &domain_list;
343 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
344 printk("ste: Cache Domain %02x.\n", (*pd)->domain_id);
345 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
346 (struct acm_ssid_domain *)(*pd)->ssid);
347 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
348 printk("\t\tcache[%02x] = %s, domid=%x.\n", i,
349 (ste_ssid->ste_cache[i].valid == VALID) ?
350 "VALID" : "FREE",
351 (ste_ssid->ste_cache[i].valid == VALID) ?
352 ste_ssid->ste_cache[i].id : 0xffffffff);
353 }
354 read_unlock(&domlist_lock);
355 /* init stats */
356 printk("STE-Policy Security Hook Statistics:\n");
357 printk("ste: event_channel eval_count = %x\n", atomic_read(&(ste_bin_pol.ec_eval_count)));
358 printk("ste: event_channel denied_count = %x\n", atomic_read(&(ste_bin_pol.ec_denied_count)));
359 printk("ste: event_channel cache_hit_count = %x\n", atomic_read(&(ste_bin_pol.ec_cachehit_count)));
360 printk("ste:\n");
361 printk("ste: grant_table eval_count = %x\n", atomic_read(&(ste_bin_pol.gt_eval_count)));
362 printk("ste: grant_table denied_count = %x\n", atomic_read(&(ste_bin_pol.gt_denied_count)));
363 printk("ste: grant_table cache_hit_count = %x\n", atomic_read(&(ste_bin_pol.gt_cachehit_count)));
364 #endif
366 if (buf_len < sizeof(struct acm_ste_stats_buffer))
367 return -ENOMEM;
369 /* now send the hook counts to user space */
370 stats.ec_eval_count = htonl(atomic_read(&ste_bin_pol.ec_eval_count));
371 stats.gt_eval_count = htonl(atomic_read(&ste_bin_pol.gt_eval_count));
372 stats.ec_denied_count = htonl(atomic_read(&ste_bin_pol.ec_denied_count));
373 stats.gt_denied_count = htonl(atomic_read(&ste_bin_pol.gt_denied_count));
374 stats.ec_cachehit_count = htonl(atomic_read(&ste_bin_pol.ec_cachehit_count));
375 stats.gt_cachehit_count = htonl(atomic_read(&ste_bin_pol.gt_cachehit_count));
376 memcpy(buf, &stats, sizeof(struct acm_ste_stats_buffer));
377 return sizeof(struct acm_ste_stats_buffer);
378 }
381 /* we need to go through this before calling the hooks,
382 * returns 1 == cache hit */
383 static int inline
384 check_cache(struct domain *dom, domid_t rdom) {
385 struct ste_ssid *ste_ssid;
386 int i;
388 printkd("checking cache: %x --> %x.\n", dom->domain_id, rdom);
389 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
390 (struct acm_ssid_domain *)(dom)->ssid);
392 for(i=0; i< ACM_TE_CACHE_SIZE; i++) {
393 if ((ste_ssid->ste_cache[i].valid == VALID) &&
394 (ste_ssid->ste_cache[i].id == rdom)) {
395 printkd("cache hit (entry %x, id= %x!\n", i, ste_ssid->ste_cache[i].id);
396 return 1;
397 }
398 }
399 return 0;
400 }
403 /* we only get here if there is NO entry yet; no duplication check! */
404 static void inline
405 cache_result(struct domain *subj, struct domain *obj) {
406 struct ste_ssid *ste_ssid;
407 int i;
408 printkd("caching from doms: %x --> %x.\n", subj->domain_id, obj->domain_id);
409 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
410 (struct acm_ssid_domain *)(subj)->ssid);
411 for(i=0; i< ACM_TE_CACHE_SIZE; i++)
412 if (ste_ssid->ste_cache[i].valid == FREE)
413 break;
414 if (i< ACM_TE_CACHE_SIZE) {
415 ste_ssid->ste_cache[i].valid = VALID;
416 ste_ssid->ste_cache[i].id = obj->domain_id;
417 } else
418 printk ("Cache of dom %x is full!\n", subj->domain_id);
419 }
421 /* deletes entries for domain 'id' from all caches (re-use) */
422 static void inline
423 clean_id_from_cache(domid_t id)
424 {
425 struct ste_ssid *ste_ssid;
426 int i;
427 struct domain **pd;
429 printkd("deleting cache for dom %x.\n", id);
431 read_lock(&domlist_lock); /* look through caches of all domains */
432 pd = &domain_list;
433 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
434 ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
435 (struct acm_ssid_domain *)(*pd)->ssid);
436 for (i=0; i<ACM_TE_CACHE_SIZE; i++)
437 if ((ste_ssid->ste_cache[i].valid == VALID) &&
438 (ste_ssid->ste_cache[i].id = id))
439 ste_ssid->ste_cache[i].valid = FREE;
440 }
441 read_unlock(&domlist_lock);
442 }
444 /***************************
445 * Authorization functions
446 **************************/
448 static int
449 ste_pre_domain_create(void *subject_ssid, ssidref_t ssidref)
450 {
451 /* check for ssidref in range for policy */
452 ssidref_t ste_ssidref;
453 traceprintk("%s.\n", __func__);
455 read_lock(&acm_bin_pol_rwlock);
456 ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
457 if (ste_ssidref == ACM_DEFAULT_LOCAL_SSID) {
458 printk("%s: ERROR STE SSID is NOT SET but policy enforced.\n", __func__);
459 read_unlock(&acm_bin_pol_rwlock);
460 return ACM_ACCESS_DENIED; /* catching and indicating config error */
461 }
462 if (ste_ssidref >= ste_bin_pol.max_ssidrefs) {
463 printk("%s: ERROR ste_ssidref > max(%x).\n",
464 __func__, ste_bin_pol.max_ssidrefs-1);
465 read_unlock(&acm_bin_pol_rwlock);
466 return ACM_ACCESS_DENIED;
467 }
468 read_unlock(&acm_bin_pol_rwlock);
469 return ACM_ACCESS_PERMITTED;
470 }
472 static void
473 ste_post_domain_destroy(void *subject_ssid, domid_t id)
474 {
475 /* clean all cache entries for destroyed domain (might be re-used) */
476 clean_id_from_cache(id);
477 }
479 /* -------- EVENTCHANNEL OPERATIONS -----------*/
480 static int
481 ste_pre_eventchannel_unbound(domid_t id) {
482 struct domain *subj, *obj;
483 int ret;
484 traceprintk("%s: dom%x-->dom%x.\n",
485 __func__, current->domain->domain_id, id);
487 if (check_cache(current->domain, id)) {
488 atomic_inc(&ste_bin_pol.ec_cachehit_count);
489 return ACM_ACCESS_PERMITTED;
490 }
491 atomic_inc(&ste_bin_pol.ec_eval_count);
492 subj = current->domain;
493 obj = find_domain_by_id(id);
495 if (share_common_type(subj, obj)) {
496 cache_result(subj, obj);
497 ret = ACM_ACCESS_PERMITTED;
498 } else {
499 atomic_inc(&ste_bin_pol.ec_denied_count);
500 ret = ACM_ACCESS_DENIED;
501 }
502 if (obj != NULL)
503 put_domain(obj);
504 return ret;
505 }
507 static int
508 ste_pre_eventchannel_interdomain(domid_t id1, domid_t id2)
509 {
510 struct domain *subj, *obj;
511 int ret;
512 traceprintk("%s: dom%x-->dom%x.\n", __func__,
513 (id1 == DOMID_SELF) ? current->domain->domain_id : id1,
514 (id2 == DOMID_SELF) ? current->domain->domain_id : id2);
516 /* following is a bit longer but ensures that we
517 * "put" only domains that we where "find"-ing
518 */
519 if (id1 == DOMID_SELF) id1 = current->domain->domain_id;
520 if (id2 == DOMID_SELF) id2 = current->domain->domain_id;
522 subj = find_domain_by_id(id1);
523 obj = find_domain_by_id(id2);
524 if ((subj == NULL) || (obj == NULL)) {
525 ret = ACM_ACCESS_DENIED;
526 goto out;
527 }
528 /* cache check late, but evtchn is not on performance critical path */
529 if (check_cache(subj, obj->domain_id)) {
530 atomic_inc(&ste_bin_pol.ec_cachehit_count);
531 ret = ACM_ACCESS_PERMITTED;
532 goto out;
533 }
534 atomic_inc(&ste_bin_pol.ec_eval_count);
536 if (share_common_type(subj, obj)) {
537 cache_result(subj, obj);
538 ret = ACM_ACCESS_PERMITTED;
539 } else {
540 atomic_inc(&ste_bin_pol.ec_denied_count);
541 ret = ACM_ACCESS_DENIED;
542 }
543 out:
544 if (obj != NULL)
545 put_domain(obj);
546 if (subj != NULL)
547 put_domain(subj);
548 return ret;
549 }
551 /* -------- SHARED MEMORY OPERATIONS -----------*/
553 static int
554 ste_pre_grant_map_ref (domid_t id) {
555 struct domain *obj, *subj;
556 int ret;
557 traceprintk("%s: dom%x-->dom%x.\n", __func__,
558 current->domain->domain_id, id);
560 if (check_cache(current->domain, id)) {
561 atomic_inc(&ste_bin_pol.gt_cachehit_count);
562 return ACM_ACCESS_PERMITTED;
563 }
564 atomic_inc(&ste_bin_pol.gt_eval_count);
565 subj = current->domain;
566 obj = find_domain_by_id(id);
568 if (share_common_type(subj, obj)) {
569 cache_result(subj, obj);
570 ret = ACM_ACCESS_PERMITTED;
571 } else {
572 atomic_inc(&ste_bin_pol.gt_denied_count);
573 printkd("%s: ACCESS DENIED!\n", __func__);
574 ret = ACM_ACCESS_DENIED;
575 }
576 if (obj != NULL)
577 put_domain(obj);
578 return ret;
579 }
581 /* since setting up grant tables involves some implicit information
582 flow from the creating domain to the domain that is setup, we
583 check types in addition to the general authorization */
584 static int
585 ste_pre_grant_setup (domid_t id) {
586 struct domain *obj, *subj;
587 int ret;
588 traceprintk("%s: dom%x-->dom%x.\n", __func__,
589 current->domain->domain_id, id);
591 if (check_cache(current->domain, id)) {
592 atomic_inc(&ste_bin_pol.gt_cachehit_count);
593 return ACM_ACCESS_PERMITTED;
594 }
595 atomic_inc(&ste_bin_pol.gt_eval_count);
596 /* a) check authorization (eventually use specific capabilities) */
597 if (!IS_PRIV(current->domain)) {
598 printk("%s: Grant table management authorization denied ERROR!\n", __func__);
599 return ACM_ACCESS_DENIED;
600 }
601 /* b) check types */
602 subj = current->domain;
603 obj = find_domain_by_id(id);
605 if (share_common_type(subj, obj)) {
606 cache_result(subj, obj);
607 ret = ACM_ACCESS_PERMITTED;
608 } else {
609 atomic_inc(&ste_bin_pol.gt_denied_count);
610 ret = ACM_ACCESS_DENIED;
611 }
612 if (obj != NULL)
613 put_domain(obj);
614 return ret;
615 }
617 /* now define the hook structure similarly to LSM */
618 struct acm_operations acm_simple_type_enforcement_ops = {
619 /* policy management services */
620 .init_domain_ssid = ste_init_domain_ssid,
621 .free_domain_ssid = ste_free_domain_ssid,
622 .dump_binary_policy = ste_dump_policy,
623 .set_binary_policy = ste_set_policy,
624 .dump_statistics = ste_dump_stats,
625 /* domain management control hooks */
626 .pre_domain_create = ste_pre_domain_create,
627 .post_domain_create = NULL,
628 .fail_domain_create = NULL,
629 .post_domain_destroy = ste_post_domain_destroy,
630 /* event channel control hooks */
631 .pre_eventchannel_unbound = ste_pre_eventchannel_unbound,
632 .fail_eventchannel_unbound = NULL,
633 .pre_eventchannel_interdomain = ste_pre_eventchannel_interdomain,
634 .fail_eventchannel_interdomain = NULL,
635 /* grant table control hooks */
636 .pre_grant_map_ref = ste_pre_grant_map_ref,
637 .fail_grant_map_ref = NULL,
638 .pre_grant_setup = ste_pre_grant_setup,
639 .fail_grant_setup = NULL,
640 };