ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/core/skbuff.c @ 10411:4f0bc5744557

[LINUX] Network buffers do not need to be multi-page contiguous
for unprivileged domains (in any case, can fall back to swiotlb).
On non-privileged domain of Xen/IA64, this caused some trouble.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kaf24@firebug.cl.cam.ac.uk
date Thu Jun 15 13:19:04 2006 +0100 (2006-06-15)
parents 48c0f5489d44
children d8338b28bcd6
line source
2 #include <linux/config.h>
3 #include <linux/module.h>
4 #include <linux/version.h>
5 #include <linux/kernel.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/netdevice.h>
9 #include <linux/inetdevice.h>
10 #include <linux/etherdevice.h>
11 #include <linux/skbuff.h>
12 #include <linux/init.h>
13 #include <asm/io.h>
14 #include <asm/page.h>
15 #include <asm/hypervisor.h>
17 /* Referenced in netback.c. */
18 /*static*/ kmem_cache_t *skbuff_cachep;
19 EXPORT_SYMBOL(skbuff_cachep);
21 #define MAX_SKBUFF_ORDER 4
22 static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
24 static struct {
25 int size;
26 kmem_cache_t *cachep;
27 } skbuff_small[] = { { 512, NULL }, { 2048, NULL } };
29 struct sk_buff *__alloc_skb(unsigned int length, gfp_t gfp_mask,
30 int fclone)
31 {
32 int order, i;
33 kmem_cache_t *cachep;
35 length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);
37 if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) {
38 for (i = 0; skbuff_small[i].size < length; i++)
39 continue;
40 cachep = skbuff_small[i].cachep;
41 } else {
42 order = get_order(length);
43 if (order > MAX_SKBUFF_ORDER) {
44 printk(KERN_ALERT "Attempt to allocate order %d "
45 "skbuff. Increase MAX_SKBUFF_ORDER.\n", order);
46 return NULL;
47 }
48 cachep = skbuff_order_cachep[order];
49 }
51 length -= sizeof(struct skb_shared_info);
53 return alloc_skb_from_cache(cachep, length, gfp_mask, fclone);
54 }
56 struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask)
57 {
58 struct sk_buff *skb;
59 int order;
61 length = SKB_DATA_ALIGN(length + 16);
62 order = get_order(length + sizeof(struct skb_shared_info));
63 if (order > MAX_SKBUFF_ORDER) {
64 printk(KERN_ALERT "Attempt to allocate order %d skbuff. "
65 "Increase MAX_SKBUFF_ORDER.\n", order);
66 return NULL;
67 }
69 skb = alloc_skb_from_cache(
70 skbuff_order_cachep[order], length, gfp_mask, 0);
71 if (skb != NULL)
72 skb_reserve(skb, 16);
74 return skb;
75 }
77 static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
78 {
79 int order = 0;
81 while (skbuff_order_cachep[order] != cachep)
82 order++;
84 /* Do our best to allocate contiguous memory but fall back to IOMMU. */
85 if (order != 0)
86 (void)xen_create_contiguous_region(
87 (unsigned long)buf, order, 0);
89 scrub_pages(buf, 1 << order);
90 }
92 static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
93 {
94 int order = 0;
96 while (skbuff_order_cachep[order] != cachep)
97 order++;
99 if (order != 0)
100 xen_destroy_contiguous_region((unsigned long)buf, order);
101 }
103 static int __init skbuff_init(void)
104 {
105 static char name[MAX_SKBUFF_ORDER + 1][20];
106 static char small_name[ARRAY_SIZE(skbuff_small)][20];
107 unsigned long size;
108 int i, order;
110 for (i = 0; i < ARRAY_SIZE(skbuff_small); i++) {
111 size = skbuff_small[i].size;
112 sprintf(small_name[i], "xen-skb-%lu", size);
113 /*
114 * No ctor/dtor: objects do not span page boundaries, and they
115 * are only used on transmit path so no need for scrubbing.
116 */
117 skbuff_small[i].cachep = kmem_cache_create(
118 small_name[i], size, size, 0, NULL, NULL);
119 }
121 for (order = 0; order <= MAX_SKBUFF_ORDER; order++) {
122 size = PAGE_SIZE << order;
123 sprintf(name[order], "xen-skb-%lu", size);
124 if (is_running_on_xen() &&
125 (xen_start_info->flags & SIF_PRIVILEGED))
126 skbuff_order_cachep[order] = kmem_cache_create(
127 name[order], size, size, 0,
128 skbuff_ctor, skbuff_dtor);
129 else
130 skbuff_order_cachep[order] = kmem_cache_create(
131 name[order], size, size, 0, NULL, NULL);
133 }
135 skbuff_cachep = skbuff_order_cachep[0];
137 return 0;
138 }
139 core_initcall(skbuff_init);
141 EXPORT_SYMBOL(__dev_alloc_skb);