direct-io.hg

view linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c @ 7763:cc1c250e672d

Max xen skbuff allocation is now 64KB. kmem caches only
get populated with objects on first allocation, so if large
caches end up being unused, they don't actually cost us
very much.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Nov 11 11:01:32 2005 +0100 (2005-11-11)
parents 356c175366a1
children
line source
2 #include <linux/config.h>
3 #include <linux/module.h>
4 #include <linux/version.h>
5 #include <linux/kernel.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/netdevice.h>
9 #include <linux/inetdevice.h>
10 #include <linux/etherdevice.h>
11 #include <linux/skbuff.h>
12 #include <linux/init.h>
13 #include <asm/io.h>
14 #include <asm/page.h>
15 #include <asm/hypervisor.h>
17 /* Referenced in netback.c. */
18 /*static*/ kmem_cache_t *skbuff_cachep;
20 #define MAX_SKBUFF_ORDER 4
21 static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
23 static struct {
24 int size;
25 kmem_cache_t *cachep;
26 } skbuff_small[] = { { 512, NULL }, { 2048, NULL } };
28 struct sk_buff *alloc_skb(unsigned int length, int gfp_mask)
29 {
30 int order, i;
31 kmem_cache_t *cachep;
33 length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);
35 if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) {
36 for (i = 0; skbuff_small[i].size < length; i++)
37 continue;
38 cachep = skbuff_small[i].cachep;
39 } else {
40 order = get_order(length);
41 if (order > MAX_SKBUFF_ORDER) {
42 printk(KERN_ALERT "Attempt to allocate order %d "
43 "skbuff. Increase MAX_SKBUFF_ORDER.\n", order);
44 return NULL;
45 }
46 cachep = skbuff_order_cachep[order];
47 }
49 length -= sizeof(struct skb_shared_info);
51 return alloc_skb_from_cache(cachep, length, gfp_mask);
52 }
54 struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
55 {
56 struct sk_buff *skb;
57 int order;
59 length = SKB_DATA_ALIGN(length + 16);
60 order = get_order(length + sizeof(struct skb_shared_info));
61 if (order > MAX_SKBUFF_ORDER) {
62 printk(KERN_ALERT "Attempt to allocate order %d skbuff. "
63 "Increase MAX_SKBUFF_ORDER.\n", order);
64 return NULL;
65 }
67 skb = alloc_skb_from_cache(
68 skbuff_order_cachep[order], length, gfp_mask);
69 if (skb != NULL)
70 skb_reserve(skb, 16);
72 return skb;
73 }
75 static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
76 {
77 int order = 0;
79 while (skbuff_order_cachep[order] != cachep)
80 order++;
82 /* Do our best to allocate contiguous memory but fall back to IOMMU. */
83 if (order != 0)
84 (void)xen_create_contiguous_region(
85 (unsigned long)buf, order, 0);
87 scrub_pages(buf, 1 << order);
88 }
90 static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
91 {
92 int order = 0;
94 while (skbuff_order_cachep[order] != cachep)
95 order++;
97 if (order != 0)
98 xen_destroy_contiguous_region((unsigned long)buf, order);
99 }
101 static int __init skbuff_init(void)
102 {
103 static char name[MAX_SKBUFF_ORDER + 1][20];
104 static char small_name[ARRAY_SIZE(skbuff_small)][20];
105 unsigned long size;
106 int i, order;
108 for (i = 0; i < ARRAY_SIZE(skbuff_small); i++) {
109 size = skbuff_small[i].size;
110 sprintf(small_name[i], "xen-skb-%lu", size);
111 /*
112 * No ctor/dtor: objects do not span page boundaries, and they
113 * are only used on transmit path so no need for scrubbing.
114 */
115 skbuff_small[i].cachep = kmem_cache_create(
116 small_name[i], size, size, 0, NULL, NULL);
117 }
119 for (order = 0; order <= MAX_SKBUFF_ORDER; order++) {
120 size = PAGE_SIZE << order;
121 sprintf(name[order], "xen-skb-%lu", size);
122 skbuff_order_cachep[order] = kmem_cache_create(
123 name[order], size, size, 0, skbuff_ctor, skbuff_dtor);
124 }
126 skbuff_cachep = skbuff_order_cachep[0];
128 return 0;
129 }
130 core_initcall(skbuff_init);
132 EXPORT_SYMBOL(__dev_alloc_skb);
134 /*
135 * Local variables:
136 * c-file-style: "linux"
137 * indent-tabs-mode: t
138 * c-indent-level: 8
139 * c-basic-offset: 8
140 * tab-width: 8
141 * End:
142 */