direct-io.hg

view linux-2.6-xen-sparse/drivers/xen/core/skbuff.c @ 11690:38f9bd7a4ce6

[NET] Make MAX_SKBUFF_ORDER page-size independent.
Original patch from Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Oct 03 11:39:22 2006 +0100 (2006-10-03)
parents d8338b28bcd6
children
line source
2 #include <linux/config.h>
3 #include <linux/module.h>
4 #include <linux/version.h>
5 #include <linux/kernel.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/netdevice.h>
9 #include <linux/inetdevice.h>
10 #include <linux/etherdevice.h>
11 #include <linux/skbuff.h>
12 #include <linux/init.h>
13 #include <asm/io.h>
14 #include <asm/page.h>
15 #include <asm/hypervisor.h>
17 /* Referenced in netback.c. */
18 /*static*/ kmem_cache_t *skbuff_cachep;
19 EXPORT_SYMBOL(skbuff_cachep);
21 /* Allow up to 64kB or page-sized packets (whichever is greater). */
22 #if PAGE_SHIFT < 16
23 #define MAX_SKBUFF_ORDER (16 - PAGE_SHIFT)
24 #else
25 #define MAX_SKBUFF_ORDER 0
26 #endif
27 static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
29 static struct {
30 int size;
31 kmem_cache_t *cachep;
32 } skbuff_small[] = { { 512, NULL }, { 2048, NULL } };
34 struct sk_buff *__alloc_skb(unsigned int length, gfp_t gfp_mask,
35 int fclone)
36 {
37 int order, i;
38 kmem_cache_t *cachep;
40 length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);
42 if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) {
43 for (i = 0; skbuff_small[i].size < length; i++)
44 continue;
45 cachep = skbuff_small[i].cachep;
46 } else {
47 order = get_order(length);
48 if (order > MAX_SKBUFF_ORDER) {
49 printk(KERN_ALERT "Attempt to allocate order %d "
50 "skbuff. Increase MAX_SKBUFF_ORDER.\n", order);
51 return NULL;
52 }
53 cachep = skbuff_order_cachep[order];
54 }
56 length -= sizeof(struct skb_shared_info);
58 return alloc_skb_from_cache(cachep, length, gfp_mask, fclone);
59 }
61 struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask)
62 {
63 struct sk_buff *skb;
64 int order;
66 length = SKB_DATA_ALIGN(length + 16);
67 order = get_order(length + sizeof(struct skb_shared_info));
68 if (order > MAX_SKBUFF_ORDER) {
69 printk(KERN_ALERT "Attempt to allocate order %d skbuff. "
70 "Increase MAX_SKBUFF_ORDER.\n", order);
71 return NULL;
72 }
74 skb = alloc_skb_from_cache(
75 skbuff_order_cachep[order], length, gfp_mask, 0);
76 if (skb != NULL)
77 skb_reserve(skb, 16);
79 return skb;
80 }
82 static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
83 {
84 int order = 0;
86 while (skbuff_order_cachep[order] != cachep)
87 order++;
89 /* Do our best to allocate contiguous memory but fall back to IOMMU. */
90 if (order != 0)
91 (void)xen_create_contiguous_region(
92 (unsigned long)buf, order, 0);
94 scrub_pages(buf, 1 << order);
95 }
97 static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
98 {
99 int order = 0;
101 while (skbuff_order_cachep[order] != cachep)
102 order++;
104 if (order != 0)
105 xen_destroy_contiguous_region((unsigned long)buf, order);
106 }
108 static int __init skbuff_init(void)
109 {
110 static char name[MAX_SKBUFF_ORDER + 1][20];
111 static char small_name[ARRAY_SIZE(skbuff_small)][20];
112 unsigned long size;
113 int i, order;
115 for (i = 0; i < ARRAY_SIZE(skbuff_small); i++) {
116 size = skbuff_small[i].size;
117 sprintf(small_name[i], "xen-skb-%lu", size);
118 /*
119 * No ctor/dtor: objects do not span page boundaries, and they
120 * are only used on transmit path so no need for scrubbing.
121 */
122 skbuff_small[i].cachep = kmem_cache_create(
123 small_name[i], size, size, 0, NULL, NULL);
124 }
126 for (order = 0; order <= MAX_SKBUFF_ORDER; order++) {
127 size = PAGE_SIZE << order;
128 sprintf(name[order], "xen-skb-%lu", size);
129 if (is_running_on_xen() && is_initial_xendomain())
130 skbuff_order_cachep[order] = kmem_cache_create(
131 name[order], size, size, 0,
132 skbuff_ctor, skbuff_dtor);
133 else
134 skbuff_order_cachep[order] = kmem_cache_create(
135 name[order], size, size, 0, NULL, NULL);
137 }
139 skbuff_cachep = skbuff_order_cachep[0];
141 return 0;
142 }
143 core_initcall(skbuff_init);
145 EXPORT_SYMBOL(__dev_alloc_skb);