If needed, change the maximum number of colors with
``CONFIG_LLC_COLORS_ORDER=<n>``.
+If needed, change the buddy allocator reserved size with
+``CONFIG_BUDDY_ALLOCATOR_SIZE=<n>``.
+
Runtime configuration is done via `Command line parameters`_.
For DomUs follow `DomUs configuration`_.
+----------------------+-------------------------------+
| ``dom0-llc-colors`` | Dom0 color configuration |
+----------------------+-------------------------------+
+| ``buddy-alloc-size`` | Buddy allocator reserved size |
++----------------------+-------------------------------+
Colors selection format
***********************
**Note:** If no color configuration is provided for a domain, the default one,
which corresponds to all available colors is used instead.
+Colored allocator and buddy allocator
+*************************************
+
+The colored allocator distributes pages based on color configurations of
+domains so that each domains only gets pages of its own colors.
+The colored allocator is meant as an alternative to the buddy allocator because
+its allocation policy is by definition incompatible with the generic one. Since
+the Xen heap is not colored yet, we need to support the coexistence of the two
+allocators and some memory must be left for the buddy one. Buddy memory
+reservation is configured via Kconfig or via command-line.
+
Known issues and limitations
****************************
allocated to the domain. This isn't possible when LLC coloring is enabled,
because that memory can't be guaranteed to use only colors assigned to the
domain.
+
+Cache coloring is intended only for embedded systems
+####################################################
+
+The current implementation aims to satisfy the need of predictability in
+embedded systems with small amount of memory to be managed in a colored way.
+Given that, some shortcuts are taken in the development. Expect worse
+performances on larger systems.
+
+Colored allocator can only make use of order-0 pages
+####################################################
+
+The cache coloring technique relies on memory mappings and on the smallest
+mapping granularity to achieve the maximum number of colors (cache partitions)
+possible. This granularity is what is normally called a page and, in Xen
+terminology, the order-0 page is the smallest one. The fairly simple
+colored allocator currently implemented, makes use only of such pages.
+It must be said that a more complex one could, in theory, adopt higher order
+pages if the colors selection contained adjacent colors. Two subsequent colors,
+for example, can be represented by an order-1 page, four colors correspond to
+an order-2 page, etc.
#include <xen/softirq.h>
#include <xen/spinlock.h>
#include <xen/vm_event.h>
+#include <xen/xvmalloc.h>
#include <asm/flushtlb.h>
#include <asm/page.h>
#define PGC_static 0
#endif
-#define PGC_no_buddy_merge PGC_static
+#ifndef PGC_colored
+#define PGC_colored 0
+#endif
+
+#define PGC_no_buddy_merge (PGC_static | PGC_colored)
/*
* Flags that are preserved in assign_pages() (and only there)
*/
-#define PGC_preserved (PGC_extra | PGC_static)
+#define PGC_preserved (PGC_extra | PGC_static | PGC_colored)
#ifndef PGT_TYPE_INFO_INITIALIZER
#define PGT_TYPE_INFO_INITIALIZER 0
return pg_offlined;
}
+static void free_color_heap_page(struct page_info *pg, bool need_scrub);
+
/* Free 2^@order set of pages. */
static void free_heap_pages(
struct page_info *pg, unsigned int order, bool need_scrub)
pg[i].count_info |= PGC_need_scrub;
poison_one_page(&pg[i]);
}
+
+ if ( pg->count_info & PGC_colored )
+ {
+ ASSERT(order == 0);
+
+ free_color_heap_page(pg, need_scrub);
+ spin_unlock(&heap_lock);
+ return;
+ }
}
avail[node][zone] += 1 << order;
return free_pages;
}
+/*************************
+ * COLORED SIDE-ALLOCATOR
+ *
+ * Pages are grouped by LLC color in lists which are globally referred to as the
+ * color heap. Lists are populated in end_boot_allocator().
+ * After initialization there will be N lists where N is the number of
+ * available colors on the platform.
+ */
+static struct page_list_head *__ro_after_init _color_heap;
+#define color_heap(color) (&_color_heap[color])
+
+static unsigned long *__ro_after_init free_colored_pages;
+
+#ifdef CONFIG_LLC_COLORING
+#define domain_num_llc_colors(d) ((d)->num_llc_colors)
+#define domain_llc_color(d, i) ((d)->llc_colors[i])
+
+/* Memory required for buddy allocator to work with colored one */
+static unsigned long __initdata buddy_alloc_size =
+ MB(CONFIG_BUDDY_ALLOCATOR_SIZE);
+size_param("buddy-alloc-size", buddy_alloc_size);
+#else
+#define domain_num_llc_colors(d) 0
+#define domain_llc_color(d, i) 0
+#endif
+
+static void free_color_heap_page(struct page_info *pg, bool need_scrub)
+{
+ unsigned int color;
+
+ color = page_to_llc_color(pg);
+ free_colored_pages[color]++;
+ /*
+ * Head insertion allows re-using cache-hot pages in configurations without
+ * sharing of colors.
+ */
+ page_list_add(pg, color_heap(color));
+}
+
+static struct page_info *alloc_color_heap_page(unsigned int memflags,
+ const struct domain *d)
+{
+ struct page_info *pg = NULL;
+ unsigned int i, color = 0;
+ unsigned long max = 0;
+ bool need_tlbflush = false;
+ uint32_t tlbflush_timestamp = 0;
+ bool need_scrub;
+
+ if ( memflags & ~(MEMF_no_refcount | MEMF_no_owner | MEMF_no_tlbflush |
+ MEMF_no_icache_flush | MEMF_no_scrub) )
+ return NULL;
+
+ spin_lock(&heap_lock);
+
+ for ( i = 0; i < domain_num_llc_colors(d); i++ )
+ {
+ unsigned long free = free_colored_pages[domain_llc_color(d, i)];
+
+ if ( free > max )
+ {
+ color = domain_llc_color(d, i);
+ pg = page_list_first(color_heap(color));
+ max = free;
+ }
+ }
+
+ if ( !pg )
+ {
+ spin_unlock(&heap_lock);
+ return NULL;
+ }
+
+ need_scrub = pg->count_info & PGC_need_scrub;
+ pg->count_info = PGC_state_inuse | (pg->count_info & PGC_colored);
+ free_colored_pages[color]--;
+ page_list_del(pg, color_heap(color));
+
+ if ( !(memflags & MEMF_no_tlbflush) )
+ accumulate_tlbflush(&need_tlbflush, pg, &tlbflush_timestamp);
+
+ init_free_page_fields(pg);
+
+ spin_unlock(&heap_lock);
+
+ if ( !(memflags & MEMF_no_scrub) )
+ {
+ if ( need_scrub )
+ scrub_one_page(pg);
+ else
+ check_one_page(pg);
+ }
+
+ if ( need_tlbflush )
+ filtered_flush_tlb_mask(tlbflush_timestamp);
+
+ flush_page_to_ram(mfn_x(page_to_mfn(pg)),
+ !(memflags & MEMF_no_icache_flush));
+
+ return pg;
+}
+
+static void __init init_color_heap_pages(struct page_info *pg,
+ unsigned long nr_pages)
+{
+ unsigned long i;
+ bool need_scrub = opt_bootscrub == BOOTSCRUB_IDLE;
+
+#ifdef CONFIG_LLC_COLORING
+ if ( buddy_alloc_size >= PAGE_SIZE )
+ {
+ unsigned long buddy_pages = min(PFN_DOWN(buddy_alloc_size), nr_pages);
+
+ init_heap_pages(pg, buddy_pages);
+ nr_pages -= buddy_pages;
+ buddy_alloc_size -= buddy_pages << PAGE_SHIFT;
+ pg += buddy_pages;
+ }
+#endif
+
+ if ( !_color_heap )
+ {
+ unsigned int max_nr_colors = get_max_nr_llc_colors();
+
+ _color_heap = xvmalloc_array(struct page_list_head, max_nr_colors);
+ free_colored_pages = xvzalloc_array(unsigned long, max_nr_colors);
+ if ( !_color_heap || !free_colored_pages )
+ panic("Can't allocate colored heap. Buddy reserved size is too low");
+
+ for ( i = 0; i < max_nr_colors; i++ )
+ INIT_PAGE_LIST_HEAD(color_heap(i));
+ }
+
+ for ( i = 0; i < nr_pages; i++ )
+ {
+ pg[i].count_info = PGC_colored;
+ free_color_heap_page(&pg[i], need_scrub);
+ }
+}
+
+static void dump_color_heap(void)
+{
+ unsigned int color;
+
+ printk("Dumping color heap info\n");
+ for ( color = 0; color < get_max_nr_llc_colors(); color++ )
+ if ( free_colored_pages[color] > 0 )
+ printk("Color heap[%u]: %lu pages\n",
+ color, free_colored_pages[color]);
+}
+
void __init end_boot_allocator(void)
{
unsigned int i;
for ( i = nr_bootmem_regions; i-- > 0; )
{
struct bootmem_region *r = &bootmem_region_list[i];
- if ( r->s < r->e )
+
+ if ( r->s >= r->e )
+ continue;
+
+ if ( llc_coloring_enabled )
+ init_color_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
+ else
init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
}
nr_bootmem_regions = 0;
if ( memflags & MEMF_no_owner )
memflags |= MEMF_no_refcount;
- if ( !dma_bitsize )
+ /* Only domains are supported for coloring */
+ if ( d && llc_coloring_enabled )
+ {
+ /* Colored allocation must be done on 0 order */
+ if ( order || (pg = alloc_color_heap_page(memflags, d)) == NULL )
+ return NULL;
+ }
+ else if ( !dma_bitsize )
memflags &= ~MEMF_no_dma;
else if ( (dma_zone = bits_to_zone(dma_bitsize)) < zone_hi )
pg = alloc_heap_pages(dma_zone + 1, zone_hi, order, memflags, d);
continue;
printk("Node %d has %lu unscrubbed pages\n", i, node_need_scrub[i]);
}
+
+ if ( llc_coloring_enabled )
+ dump_color_heap();
}
static __init int cf_check register_heap_trigger(void)