ia64/linux-2.6.18-xen.hg

view drivers/net/ibmveth.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /**************************************************************************/
2 /* */
3 /* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4 /* Copyright (C) 2003 IBM Corp. */
5 /* Originally written by Dave Larson (larson1@us.ibm.com) */
6 /* Maintained by Santiago Leon (santil@us.ibm.com) */
7 /* */
8 /* This program is free software; you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation; either version 2 of the License, or */
11 /* (at your option) any later version. */
12 /* */
13 /* This program is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
17 /* */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with this program; if not, write to the Free Software */
20 /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
21 /* USA */
22 /* */
23 /* This module contains the implementation of a virtual ethernet device */
24 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25 /* option of the RS/6000 Platform Architechture to interface with virtual */
26 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* */
28 /**************************************************************************/
29 /*
30 TODO:
31 - remove frag processing code - no longer needed
32 - add support for sysfs
33 - possibly remove procfs support
34 */
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/ioport.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/kernel.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/skbuff.h>
45 #include <linux/init.h>
46 #include <linux/delay.h>
47 #include <linux/mm.h>
48 #include <linux/ethtool.h>
49 #include <linux/proc_fs.h>
50 #include <asm/semaphore.h>
51 #include <asm/hvcall.h>
52 #include <asm/atomic.h>
53 #include <asm/iommu.h>
54 #include <asm/vio.h>
55 #include <asm/uaccess.h>
56 #include <linux/seq_file.h>
58 #include "ibmveth.h"
60 #undef DEBUG
62 #define ibmveth_printk(fmt, args...) \
63 printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
65 #define ibmveth_error_printk(fmt, args...) \
66 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
68 #ifdef DEBUG
69 #define ibmveth_debug_printk_no_adapter(fmt, args...) \
70 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
71 #define ibmveth_debug_printk(fmt, args...) \
72 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
73 #define ibmveth_assert(expr) \
74 if(!(expr)) { \
75 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
76 BUG(); \
77 }
78 #else
79 #define ibmveth_debug_printk_no_adapter(fmt, args...)
80 #define ibmveth_debug_printk(fmt, args...)
81 #define ibmveth_assert(expr)
82 #endif
84 static int ibmveth_open(struct net_device *dev);
85 static int ibmveth_close(struct net_device *dev);
86 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
87 static int ibmveth_poll(struct net_device *dev, int *budget);
88 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
89 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev);
90 static void ibmveth_set_multicast_list(struct net_device *dev);
91 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
92 static void ibmveth_proc_register_driver(void);
93 static void ibmveth_proc_unregister_driver(void);
94 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
95 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
96 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
97 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
98 static struct kobj_type ktype_veth_pool;
100 #ifdef CONFIG_PROC_FS
101 #define IBMVETH_PROC_DIR "net/ibmveth"
102 static struct proc_dir_entry *ibmveth_proc_dir;
103 #endif
105 static const char ibmveth_driver_name[] = "ibmveth";
106 static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
107 #define ibmveth_driver_version "1.03"
109 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
110 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
111 MODULE_LICENSE("GPL");
112 MODULE_VERSION(ibmveth_driver_version);
114 /* simple methods of getting data from the current rxq entry */
115 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
116 {
117 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle);
118 }
120 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
121 {
122 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid);
123 }
125 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
126 {
127 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset);
128 }
130 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
131 {
132 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
133 }
135 /* setup the initial settings for a buffer pool */
136 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
137 {
138 pool->size = pool_size;
139 pool->index = pool_index;
140 pool->buff_size = buff_size;
141 pool->threshold = pool_size / 2;
142 pool->active = pool_active;
143 }
145 /* allocate and setup an buffer pool - called during open */
146 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
147 {
148 int i;
150 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
152 if(!pool->free_map) {
153 return -1;
154 }
156 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
157 if(!pool->dma_addr) {
158 kfree(pool->free_map);
159 pool->free_map = NULL;
160 return -1;
161 }
163 pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
165 if(!pool->skbuff) {
166 kfree(pool->dma_addr);
167 pool->dma_addr = NULL;
169 kfree(pool->free_map);
170 pool->free_map = NULL;
171 return -1;
172 }
174 memset(pool->skbuff, 0, sizeof(void*) * pool->size);
175 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
177 for(i = 0; i < pool->size; ++i) {
178 pool->free_map[i] = i;
179 }
181 atomic_set(&pool->available, 0);
182 pool->producer_index = 0;
183 pool->consumer_index = 0;
185 return 0;
186 }
188 /* replenish the buffers for a pool. note that we don't need to
189 * skb_reserve these since they are used for incoming...
190 */
191 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
192 {
193 u32 i;
194 u32 count = pool->size - atomic_read(&pool->available);
195 u32 buffers_added = 0;
197 mb();
199 for(i = 0; i < count; ++i) {
200 struct sk_buff *skb;
201 unsigned int free_index, index;
202 u64 correlator;
203 union ibmveth_buf_desc desc;
204 unsigned long lpar_rc;
205 dma_addr_t dma_addr;
207 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
209 if(!skb) {
210 ibmveth_debug_printk("replenish: unable to allocate skb\n");
211 adapter->replenish_no_mem++;
212 break;
213 }
215 free_index = pool->consumer_index++ % pool->size;
216 index = pool->free_map[free_index];
218 ibmveth_assert(index != IBM_VETH_INVALID_MAP);
219 ibmveth_assert(pool->skbuff[index] == NULL);
221 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
222 pool->buff_size, DMA_FROM_DEVICE);
224 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
225 pool->dma_addr[index] = dma_addr;
226 pool->skbuff[index] = skb;
228 correlator = ((u64)pool->index << 32) | index;
229 *(u64*)skb->data = correlator;
231 desc.desc = 0;
232 desc.fields.valid = 1;
233 desc.fields.length = pool->buff_size;
234 desc.fields.address = dma_addr;
236 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
238 if(lpar_rc != H_SUCCESS) {
239 pool->free_map[free_index] = index;
240 pool->skbuff[index] = NULL;
241 pool->consumer_index--;
242 dma_unmap_single(&adapter->vdev->dev,
243 pool->dma_addr[index], pool->buff_size,
244 DMA_FROM_DEVICE);
245 dev_kfree_skb_any(skb);
246 adapter->replenish_add_buff_failure++;
247 break;
248 } else {
249 buffers_added++;
250 adapter->replenish_add_buff_success++;
251 }
252 }
254 mb();
255 atomic_add(buffers_added, &(pool->available));
256 }
258 /* replenish routine */
259 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
260 {
261 int i;
263 adapter->replenish_task_cycles++;
265 for(i = 0; i < IbmVethNumBufferPools; i++)
266 if(adapter->rx_buff_pool[i].active)
267 ibmveth_replenish_buffer_pool(adapter,
268 &adapter->rx_buff_pool[i]);
270 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
271 }
273 /* empty and free ana buffer pool - also used to do cleanup in error paths */
274 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
275 {
276 int i;
278 kfree(pool->free_map);
279 pool->free_map = NULL;
281 if(pool->skbuff && pool->dma_addr) {
282 for(i = 0; i < pool->size; ++i) {
283 struct sk_buff *skb = pool->skbuff[i];
284 if(skb) {
285 dma_unmap_single(&adapter->vdev->dev,
286 pool->dma_addr[i],
287 pool->buff_size,
288 DMA_FROM_DEVICE);
289 dev_kfree_skb_any(skb);
290 pool->skbuff[i] = NULL;
291 }
292 }
293 }
295 if(pool->dma_addr) {
296 kfree(pool->dma_addr);
297 pool->dma_addr = NULL;
298 }
300 if(pool->skbuff) {
301 kfree(pool->skbuff);
302 pool->skbuff = NULL;
303 }
304 }
306 /* remove a buffer from a pool */
307 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
308 {
309 unsigned int pool = correlator >> 32;
310 unsigned int index = correlator & 0xffffffffUL;
311 unsigned int free_index;
312 struct sk_buff *skb;
314 ibmveth_assert(pool < IbmVethNumBufferPools);
315 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
317 skb = adapter->rx_buff_pool[pool].skbuff[index];
319 ibmveth_assert(skb != NULL);
321 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
323 dma_unmap_single(&adapter->vdev->dev,
324 adapter->rx_buff_pool[pool].dma_addr[index],
325 adapter->rx_buff_pool[pool].buff_size,
326 DMA_FROM_DEVICE);
328 free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size;
329 adapter->rx_buff_pool[pool].free_map[free_index] = index;
331 mb();
333 atomic_dec(&(adapter->rx_buff_pool[pool].available));
334 }
336 /* get the current buffer on the rx queue */
337 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
338 {
339 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
340 unsigned int pool = correlator >> 32;
341 unsigned int index = correlator & 0xffffffffUL;
343 ibmveth_assert(pool < IbmVethNumBufferPools);
344 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
346 return adapter->rx_buff_pool[pool].skbuff[index];
347 }
349 /* recycle the current buffer on the rx queue */
350 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
351 {
352 u32 q_index = adapter->rx_queue.index;
353 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
354 unsigned int pool = correlator >> 32;
355 unsigned int index = correlator & 0xffffffffUL;
356 union ibmveth_buf_desc desc;
357 unsigned long lpar_rc;
359 ibmveth_assert(pool < IbmVethNumBufferPools);
360 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
362 if(!adapter->rx_buff_pool[pool].active) {
363 ibmveth_rxq_harvest_buffer(adapter);
364 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
365 return;
366 }
368 desc.desc = 0;
369 desc.fields.valid = 1;
370 desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
371 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
373 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
375 if(lpar_rc != H_SUCCESS) {
376 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
377 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
378 }
380 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
381 adapter->rx_queue.index = 0;
382 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
383 }
384 }
386 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
387 {
388 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
390 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
391 adapter->rx_queue.index = 0;
392 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
393 }
394 }
396 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
397 {
398 int i;
400 if(adapter->buffer_list_addr != NULL) {
401 if(!dma_mapping_error(adapter->buffer_list_dma)) {
402 dma_unmap_single(&adapter->vdev->dev,
403 adapter->buffer_list_dma, 4096,
404 DMA_BIDIRECTIONAL);
405 adapter->buffer_list_dma = DMA_ERROR_CODE;
406 }
407 free_page((unsigned long)adapter->buffer_list_addr);
408 adapter->buffer_list_addr = NULL;
409 }
411 if(adapter->filter_list_addr != NULL) {
412 if(!dma_mapping_error(adapter->filter_list_dma)) {
413 dma_unmap_single(&adapter->vdev->dev,
414 adapter->filter_list_dma, 4096,
415 DMA_BIDIRECTIONAL);
416 adapter->filter_list_dma = DMA_ERROR_CODE;
417 }
418 free_page((unsigned long)adapter->filter_list_addr);
419 adapter->filter_list_addr = NULL;
420 }
422 if(adapter->rx_queue.queue_addr != NULL) {
423 if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
424 dma_unmap_single(&adapter->vdev->dev,
425 adapter->rx_queue.queue_dma,
426 adapter->rx_queue.queue_len,
427 DMA_BIDIRECTIONAL);
428 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
429 }
430 kfree(adapter->rx_queue.queue_addr);
431 adapter->rx_queue.queue_addr = NULL;
432 }
434 for(i = 0; i<IbmVethNumBufferPools; i++)
435 if (adapter->rx_buff_pool[i].active)
436 ibmveth_free_buffer_pool(adapter,
437 &adapter->rx_buff_pool[i]);
438 }
440 static int ibmveth_open(struct net_device *netdev)
441 {
442 struct ibmveth_adapter *adapter = netdev->priv;
443 u64 mac_address = 0;
444 int rxq_entries = 1;
445 unsigned long lpar_rc;
446 int rc;
447 union ibmveth_buf_desc rxq_desc;
448 int i;
450 ibmveth_debug_printk("open starting\n");
452 for(i = 0; i<IbmVethNumBufferPools; i++)
453 rxq_entries += adapter->rx_buff_pool[i].size;
455 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
456 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
458 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
459 ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
460 ibmveth_cleanup(adapter);
461 return -ENOMEM;
462 }
464 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
465 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
467 if(!adapter->rx_queue.queue_addr) {
468 ibmveth_error_printk("unable to allocate rx queue pages\n");
469 ibmveth_cleanup(adapter);
470 return -ENOMEM;
471 }
473 adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
474 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
475 adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
476 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
477 adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
478 adapter->rx_queue.queue_addr,
479 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
481 if((dma_mapping_error(adapter->buffer_list_dma) ) ||
482 (dma_mapping_error(adapter->filter_list_dma)) ||
483 (dma_mapping_error(adapter->rx_queue.queue_dma))) {
484 ibmveth_error_printk("unable to map filter or buffer list pages\n");
485 ibmveth_cleanup(adapter);
486 return -ENOMEM;
487 }
489 adapter->rx_queue.index = 0;
490 adapter->rx_queue.num_slots = rxq_entries;
491 adapter->rx_queue.toggle = 1;
493 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
494 mac_address = mac_address >> 16;
496 rxq_desc.desc = 0;
497 rxq_desc.fields.valid = 1;
498 rxq_desc.fields.length = adapter->rx_queue.queue_len;
499 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
501 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
502 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
503 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
506 lpar_rc = h_register_logical_lan(adapter->vdev->unit_address,
507 adapter->buffer_list_dma,
508 rxq_desc.desc,
509 adapter->filter_list_dma,
510 mac_address);
512 if(lpar_rc != H_SUCCESS) {
513 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
514 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
515 adapter->buffer_list_dma,
516 adapter->filter_list_dma,
517 rxq_desc.desc,
518 mac_address);
519 ibmveth_cleanup(adapter);
520 return -ENONET;
521 }
523 for(i = 0; i<IbmVethNumBufferPools; i++) {
524 if(!adapter->rx_buff_pool[i].active)
525 continue;
526 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
527 ibmveth_error_printk("unable to alloc pool\n");
528 adapter->rx_buff_pool[i].active = 0;
529 ibmveth_cleanup(adapter);
530 return -ENOMEM ;
531 }
532 }
534 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
535 if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
536 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
537 do {
538 rc = h_free_logical_lan(adapter->vdev->unit_address);
539 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
541 ibmveth_cleanup(adapter);
542 return rc;
543 }
545 ibmveth_debug_printk("initial replenish cycle\n");
546 ibmveth_interrupt(netdev->irq, netdev, NULL);
548 netif_start_queue(netdev);
550 ibmveth_debug_printk("open complete\n");
552 return 0;
553 }
555 static int ibmveth_close(struct net_device *netdev)
556 {
557 struct ibmveth_adapter *adapter = netdev->priv;
558 long lpar_rc;
560 ibmveth_debug_printk("close starting\n");
562 if (!adapter->pool_config)
563 netif_stop_queue(netdev);
565 free_irq(netdev->irq, netdev);
567 do {
568 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
569 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
571 if(lpar_rc != H_SUCCESS)
572 {
573 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
574 lpar_rc);
575 }
577 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
579 ibmveth_cleanup(adapter);
581 ibmveth_debug_printk("close complete\n");
583 return 0;
584 }
586 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
587 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
588 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
589 cmd->speed = SPEED_1000;
590 cmd->duplex = DUPLEX_FULL;
591 cmd->port = PORT_FIBRE;
592 cmd->phy_address = 0;
593 cmd->transceiver = XCVR_INTERNAL;
594 cmd->autoneg = AUTONEG_ENABLE;
595 cmd->maxtxpkt = 0;
596 cmd->maxrxpkt = 1;
597 return 0;
598 }
600 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
601 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
602 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
603 }
605 static u32 netdev_get_link(struct net_device *dev) {
606 return 1;
607 }
609 static struct ethtool_ops netdev_ethtool_ops = {
610 .get_drvinfo = netdev_get_drvinfo,
611 .get_settings = netdev_get_settings,
612 .get_link = netdev_get_link,
613 .get_sg = ethtool_op_get_sg,
614 .get_tx_csum = ethtool_op_get_tx_csum,
615 };
617 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
618 {
619 return -EOPNOTSUPP;
620 }
622 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
624 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
625 {
626 struct ibmveth_adapter *adapter = netdev->priv;
627 union ibmveth_buf_desc desc[IbmVethMaxSendFrags];
628 unsigned long lpar_rc;
629 int nfrags = 0, curfrag;
630 unsigned long correlator;
631 unsigned long flags;
632 unsigned int retry_count;
633 unsigned int tx_dropped = 0;
634 unsigned int tx_bytes = 0;
635 unsigned int tx_packets = 0;
636 unsigned int tx_send_failed = 0;
637 unsigned int tx_map_failed = 0;
640 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
641 tx_dropped++;
642 goto out;
643 }
645 memset(&desc, 0, sizeof(desc));
647 /* nfrags = number of frags after the initial fragment */
648 nfrags = skb_shinfo(skb)->nr_frags;
650 if(nfrags)
651 adapter->tx_multidesc_send++;
653 /* map the initial fragment */
654 desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len;
655 desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
656 desc[0].fields.length, DMA_TO_DEVICE);
657 desc[0].fields.valid = 1;
659 if(dma_mapping_error(desc[0].fields.address)) {
660 ibmveth_error_printk("tx: unable to map initial fragment\n");
661 tx_map_failed++;
662 tx_dropped++;
663 goto out;
664 }
666 curfrag = nfrags;
668 /* map fragments past the initial portion if there are any */
669 while(curfrag--) {
670 skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag];
671 desc[curfrag+1].fields.address
672 = dma_map_single(&adapter->vdev->dev,
673 page_address(frag->page) + frag->page_offset,
674 frag->size, DMA_TO_DEVICE);
675 desc[curfrag+1].fields.length = frag->size;
676 desc[curfrag+1].fields.valid = 1;
678 if(dma_mapping_error(desc[curfrag+1].fields.address)) {
679 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
680 tx_map_failed++;
681 tx_dropped++;
682 /* Free all the mappings we just created */
683 while(curfrag < nfrags) {
684 dma_unmap_single(&adapter->vdev->dev,
685 desc[curfrag+1].fields.address,
686 desc[curfrag+1].fields.length,
687 DMA_TO_DEVICE);
688 curfrag++;
689 }
690 goto out;
691 }
692 }
694 /* send the frame. Arbitrarily set retrycount to 1024 */
695 correlator = 0;
696 retry_count = 1024;
697 do {
698 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
699 desc[0].desc,
700 desc[1].desc,
701 desc[2].desc,
702 desc[3].desc,
703 desc[4].desc,
704 desc[5].desc,
705 correlator);
706 } while ((lpar_rc == H_BUSY) && (retry_count--));
708 if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
709 int i;
710 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
711 for(i = 0; i < 6; i++) {
712 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
713 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
714 }
715 tx_send_failed++;
716 tx_dropped++;
717 } else {
718 tx_packets++;
719 tx_bytes += skb->len;
720 netdev->trans_start = jiffies;
721 }
723 do {
724 dma_unmap_single(&adapter->vdev->dev,
725 desc[nfrags].fields.address,
726 desc[nfrags].fields.length, DMA_TO_DEVICE);
727 } while(--nfrags >= 0);
729 out: spin_lock_irqsave(&adapter->stats_lock, flags);
730 adapter->stats.tx_dropped += tx_dropped;
731 adapter->stats.tx_bytes += tx_bytes;
732 adapter->stats.tx_packets += tx_packets;
733 adapter->tx_send_failed += tx_send_failed;
734 adapter->tx_map_failed += tx_map_failed;
735 spin_unlock_irqrestore(&adapter->stats_lock, flags);
737 dev_kfree_skb(skb);
738 return 0;
739 }
741 static int ibmveth_poll(struct net_device *netdev, int *budget)
742 {
743 struct ibmveth_adapter *adapter = netdev->priv;
744 int max_frames_to_process = netdev->quota;
745 int frames_processed = 0;
746 int more_work = 1;
747 unsigned long lpar_rc;
749 restart_poll:
750 do {
751 struct net_device *netdev = adapter->netdev;
753 if(ibmveth_rxq_pending_buffer(adapter)) {
754 struct sk_buff *skb;
756 rmb();
758 if(!ibmveth_rxq_buffer_valid(adapter)) {
759 wmb(); /* suggested by larson1 */
760 adapter->rx_invalid_buffer++;
761 ibmveth_debug_printk("recycling invalid buffer\n");
762 ibmveth_rxq_recycle_buffer(adapter);
763 } else {
764 int length = ibmveth_rxq_frame_length(adapter);
765 int offset = ibmveth_rxq_frame_offset(adapter);
766 skb = ibmveth_rxq_get_buffer(adapter);
768 ibmveth_rxq_harvest_buffer(adapter);
770 skb_reserve(skb, offset);
771 skb_put(skb, length);
772 skb->dev = netdev;
773 skb->protocol = eth_type_trans(skb, netdev);
775 netif_receive_skb(skb); /* send it up */
777 adapter->stats.rx_packets++;
778 adapter->stats.rx_bytes += length;
779 frames_processed++;
780 netdev->last_rx = jiffies;
781 }
782 } else {
783 more_work = 0;
784 }
785 } while(more_work && (frames_processed < max_frames_to_process));
787 ibmveth_replenish_task(adapter);
789 if(more_work) {
790 /* more work to do - return that we are not done yet */
791 netdev->quota -= frames_processed;
792 *budget -= frames_processed;
793 return 1;
794 }
796 /* we think we are done - reenable interrupts, then check once more to make sure we are done */
797 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
799 ibmveth_assert(lpar_rc == H_SUCCESS);
801 netif_rx_complete(netdev);
803 if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed))
804 {
805 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
806 ibmveth_assert(lpar_rc == H_SUCCESS);
807 more_work = 1;
808 goto restart_poll;
809 }
811 netdev->quota -= frames_processed;
812 *budget -= frames_processed;
814 /* we really are done */
815 return 0;
816 }
818 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
819 {
820 struct net_device *netdev = dev_instance;
821 struct ibmveth_adapter *adapter = netdev->priv;
822 unsigned long lpar_rc;
824 if(netif_rx_schedule_prep(netdev)) {
825 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
826 ibmveth_assert(lpar_rc == H_SUCCESS);
827 __netif_rx_schedule(netdev);
828 }
829 return IRQ_HANDLED;
830 }
832 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev)
833 {
834 struct ibmveth_adapter *adapter = dev->priv;
835 return &adapter->stats;
836 }
838 static void ibmveth_set_multicast_list(struct net_device *netdev)
839 {
840 struct ibmveth_adapter *adapter = netdev->priv;
841 unsigned long lpar_rc;
843 if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
844 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
845 IbmVethMcastEnableRecv |
846 IbmVethMcastDisableFiltering,
847 0);
848 if(lpar_rc != H_SUCCESS) {
849 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
850 }
851 } else {
852 struct dev_mc_list *mclist = netdev->mc_list;
853 int i;
854 /* clear the filter table & disable filtering */
855 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
856 IbmVethMcastEnableRecv |
857 IbmVethMcastDisableFiltering |
858 IbmVethMcastClearFilterTable,
859 0);
860 if(lpar_rc != H_SUCCESS) {
861 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
862 }
863 /* add the addresses to the filter table */
864 for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
865 // add the multicast address to the filter table
866 unsigned long mcast_addr = 0;
867 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
868 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
869 IbmVethMcastAddFilter,
870 mcast_addr);
871 if(lpar_rc != H_SUCCESS) {
872 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
873 }
874 }
876 /* re-enable filtering */
877 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
878 IbmVethMcastEnableFiltering,
879 0);
880 if(lpar_rc != H_SUCCESS) {
881 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
882 }
883 }
884 }
886 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
887 {
888 struct ibmveth_adapter *adapter = dev->priv;
889 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
890 int i;
892 if (new_mtu < IBMVETH_MAX_MTU)
893 return -EINVAL;
895 /* Look for an active buffer pool that can hold the new MTU */
896 for(i = 0; i<IbmVethNumBufferPools; i++) {
897 if (!adapter->rx_buff_pool[i].active)
898 continue;
899 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
900 dev->mtu = new_mtu;
901 return 0;
902 }
903 }
904 return -EINVAL;
905 }
907 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
908 {
909 int rc, i;
910 struct net_device *netdev;
911 struct ibmveth_adapter *adapter = NULL;
913 unsigned char *mac_addr_p;
914 unsigned int *mcastFilterSize_p;
917 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
918 dev->unit_address);
920 mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0);
921 if(!mac_addr_p) {
922 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
923 "attribute\n", __FILE__, __LINE__);
924 return 0;
925 }
927 mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0);
928 if(!mcastFilterSize_p) {
929 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
930 "VETH_MCAST_FILTER_SIZE attribute\n",
931 __FILE__, __LINE__);
932 return 0;
933 }
935 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
937 if(!netdev)
938 return -ENOMEM;
940 SET_MODULE_OWNER(netdev);
942 adapter = netdev->priv;
943 memset(adapter, 0, sizeof(adapter));
944 dev->dev.driver_data = netdev;
946 adapter->vdev = dev;
947 adapter->netdev = netdev;
948 adapter->mcastFilterSize= *mcastFilterSize_p;
949 adapter->pool_config = 0;
951 /* Some older boxes running PHYP non-natively have an OF that
952 returns a 8-byte local-mac-address field (and the first
953 2 bytes have to be ignored) while newer boxes' OF return
954 a 6-byte field. Note that IEEE 1275 specifies that
955 local-mac-address must be a 6-byte field.
956 The RPA doc specifies that the first byte must be 10b, so
957 we'll just look for it to solve this 8 vs. 6 byte field issue */
959 if ((*mac_addr_p & 0x3) != 0x02)
960 mac_addr_p += 2;
962 adapter->mac_addr = 0;
963 memcpy(&adapter->mac_addr, mac_addr_p, 6);
965 adapter->liobn = dev->iommu_table->it_index;
967 netdev->irq = dev->irq;
968 netdev->open = ibmveth_open;
969 netdev->poll = ibmveth_poll;
970 netdev->weight = 16;
971 netdev->stop = ibmveth_close;
972 netdev->hard_start_xmit = ibmveth_start_xmit;
973 netdev->get_stats = ibmveth_get_stats;
974 netdev->set_multicast_list = ibmveth_set_multicast_list;
975 netdev->do_ioctl = ibmveth_ioctl;
976 netdev->ethtool_ops = &netdev_ethtool_ops;
977 netdev->change_mtu = ibmveth_change_mtu;
978 SET_NETDEV_DEV(netdev, &dev->dev);
979 netdev->features |= NETIF_F_LLTX;
980 spin_lock_init(&adapter->stats_lock);
982 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
984 for(i = 0; i<IbmVethNumBufferPools; i++) {
985 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
986 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
987 pool_count[i], pool_size[i],
988 pool_active[i]);
989 kobj->parent = &dev->dev.kobj;
990 sprintf(kobj->name, "pool%d", i);
991 kobj->ktype = &ktype_veth_pool;
992 kobject_register(kobj);
993 }
995 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
997 adapter->buffer_list_dma = DMA_ERROR_CODE;
998 adapter->filter_list_dma = DMA_ERROR_CODE;
999 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1001 ibmveth_debug_printk("registering netdev...\n");
1003 rc = register_netdev(netdev);
1005 if(rc) {
1006 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
1007 free_netdev(netdev);
1008 return rc;
1011 ibmveth_debug_printk("registered\n");
1013 ibmveth_proc_register_adapter(adapter);
1015 return 0;
1018 static int __devexit ibmveth_remove(struct vio_dev *dev)
1020 struct net_device *netdev = dev->dev.driver_data;
1021 struct ibmveth_adapter *adapter = netdev->priv;
1022 int i;
1024 for(i = 0; i<IbmVethNumBufferPools; i++)
1025 kobject_unregister(&adapter->rx_buff_pool[i].kobj);
1027 unregister_netdev(netdev);
1029 ibmveth_proc_unregister_adapter(adapter);
1031 free_netdev(netdev);
1032 return 0;
1035 #ifdef CONFIG_PROC_FS
1036 static void ibmveth_proc_register_driver(void)
1038 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL);
1039 if (ibmveth_proc_dir) {
1040 SET_MODULE_OWNER(ibmveth_proc_dir);
1044 static void ibmveth_proc_unregister_driver(void)
1046 remove_proc_entry(IBMVETH_PROC_DIR, NULL);
1049 static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
1051 if (*pos == 0) {
1052 return (void *)1;
1053 } else {
1054 return NULL;
1058 static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1060 ++*pos;
1061 return NULL;
1064 static void ibmveth_seq_stop(struct seq_file *seq, void *v)
1068 static int ibmveth_seq_show(struct seq_file *seq, void *v)
1070 struct ibmveth_adapter *adapter = seq->private;
1071 char *current_mac = ((char*) &adapter->netdev->dev_addr);
1072 char *firmware_mac = ((char*) &adapter->mac_addr) ;
1074 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1076 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1077 seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn);
1078 seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1079 current_mac[0], current_mac[1], current_mac[2],
1080 current_mac[3], current_mac[4], current_mac[5]);
1081 seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1082 firmware_mac[0], firmware_mac[1], firmware_mac[2],
1083 firmware_mac[3], firmware_mac[4], firmware_mac[5]);
1085 seq_printf(seq, "\nAdapter Statistics:\n");
1086 seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized);
1087 seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send);
1088 seq_printf(seq, " skb_linearize failures: %ld\n", adapter->tx_linearize_failed);
1089 seq_printf(seq, " vio_map_single failres: %ld\n", adapter->tx_map_failed);
1090 seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed);
1091 seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles);
1092 seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem);
1093 seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure);
1094 seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer);
1095 seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer);
1097 return 0;
1099 static struct seq_operations ibmveth_seq_ops = {
1100 .start = ibmveth_seq_start,
1101 .next = ibmveth_seq_next,
1102 .stop = ibmveth_seq_stop,
1103 .show = ibmveth_seq_show,
1104 };
1106 static int ibmveth_proc_open(struct inode *inode, struct file *file)
1108 struct seq_file *seq;
1109 struct proc_dir_entry *proc;
1110 int rc;
1112 rc = seq_open(file, &ibmveth_seq_ops);
1113 if (!rc) {
1114 /* recover the pointer buried in proc_dir_entry data */
1115 seq = file->private_data;
1116 proc = PDE(inode);
1117 seq->private = proc->data;
1119 return rc;
1122 static struct file_operations ibmveth_proc_fops = {
1123 .owner = THIS_MODULE,
1124 .open = ibmveth_proc_open,
1125 .read = seq_read,
1126 .llseek = seq_lseek,
1127 .release = seq_release,
1128 };
1130 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1132 struct proc_dir_entry *entry;
1133 if (ibmveth_proc_dir) {
1134 entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir);
1135 if (!entry) {
1136 ibmveth_error_printk("Cannot create adapter proc entry");
1137 } else {
1138 entry->data = (void *) adapter;
1139 entry->proc_fops = &ibmveth_proc_fops;
1140 SET_MODULE_OWNER(entry);
1143 return;
1146 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1148 if (ibmveth_proc_dir) {
1149 remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir);
1153 #else /* CONFIG_PROC_FS */
1154 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1158 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1161 static void ibmveth_proc_register_driver(void)
1165 static void ibmveth_proc_unregister_driver(void)
1168 #endif /* CONFIG_PROC_FS */
1170 static struct attribute veth_active_attr;
1171 static struct attribute veth_num_attr;
1172 static struct attribute veth_size_attr;
1174 static ssize_t veth_pool_show(struct kobject * kobj,
1175 struct attribute * attr, char * buf)
1177 struct ibmveth_buff_pool *pool = container_of(kobj,
1178 struct ibmveth_buff_pool,
1179 kobj);
1181 if (attr == &veth_active_attr)
1182 return sprintf(buf, "%d\n", pool->active);
1183 else if (attr == &veth_num_attr)
1184 return sprintf(buf, "%d\n", pool->size);
1185 else if (attr == &veth_size_attr)
1186 return sprintf(buf, "%d\n", pool->buff_size);
1187 return 0;
1190 static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
1191 const char * buf, size_t count)
1193 struct ibmveth_buff_pool *pool = container_of(kobj,
1194 struct ibmveth_buff_pool,
1195 kobj);
1196 struct net_device *netdev =
1197 container_of(kobj->parent, struct device, kobj)->driver_data;
1198 struct ibmveth_adapter *adapter = netdev->priv;
1199 long value = simple_strtol(buf, NULL, 10);
1200 long rc;
1202 if (attr == &veth_active_attr) {
1203 if (value && !pool->active) {
1204 if(ibmveth_alloc_buffer_pool(pool)) {
1205 ibmveth_error_printk("unable to alloc pool\n");
1206 return -ENOMEM;
1208 pool->active = 1;
1209 adapter->pool_config = 1;
1210 ibmveth_close(netdev);
1211 adapter->pool_config = 0;
1212 if ((rc = ibmveth_open(netdev)))
1213 return rc;
1214 } else if (!value && pool->active) {
1215 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1216 int i;
1217 /* Make sure there is a buffer pool with buffers that
1218 can hold a packet of the size of the MTU */
1219 for(i = 0; i<IbmVethNumBufferPools; i++) {
1220 if (pool == &adapter->rx_buff_pool[i])
1221 continue;
1222 if (!adapter->rx_buff_pool[i].active)
1223 continue;
1224 if (mtu < adapter->rx_buff_pool[i].buff_size) {
1225 pool->active = 0;
1226 h_free_logical_lan_buffer(adapter->
1227 vdev->
1228 unit_address,
1229 pool->
1230 buff_size);
1233 if (pool->active) {
1234 ibmveth_error_printk("no active pool >= MTU\n");
1235 return -EPERM;
1238 } else if (attr == &veth_num_attr) {
1239 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
1240 return -EINVAL;
1241 else {
1242 adapter->pool_config = 1;
1243 ibmveth_close(netdev);
1244 adapter->pool_config = 0;
1245 pool->size = value;
1246 if ((rc = ibmveth_open(netdev)))
1247 return rc;
1249 } else if (attr == &veth_size_attr) {
1250 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
1251 return -EINVAL;
1252 else {
1253 adapter->pool_config = 1;
1254 ibmveth_close(netdev);
1255 adapter->pool_config = 0;
1256 pool->buff_size = value;
1257 if ((rc = ibmveth_open(netdev)))
1258 return rc;
1262 /* kick the interrupt handler to allocate/deallocate pools */
1263 ibmveth_interrupt(netdev->irq, netdev, NULL);
1264 return count;
1268 #define ATTR(_name, _mode) \
1269 struct attribute veth_##_name##_attr = { \
1270 .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \
1271 };
1273 static ATTR(active, 0644);
1274 static ATTR(num, 0644);
1275 static ATTR(size, 0644);
1277 static struct attribute * veth_pool_attrs[] = {
1278 &veth_active_attr,
1279 &veth_num_attr,
1280 &veth_size_attr,
1281 NULL,
1282 };
1284 static struct sysfs_ops veth_pool_ops = {
1285 .show = veth_pool_show,
1286 .store = veth_pool_store,
1287 };
1289 static struct kobj_type ktype_veth_pool = {
1290 .release = NULL,
1291 .sysfs_ops = &veth_pool_ops,
1292 .default_attrs = veth_pool_attrs,
1293 };
1296 static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1297 { "network", "IBM,l-lan"},
1298 { "", "" }
1299 };
1300 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1302 static struct vio_driver ibmveth_driver = {
1303 .id_table = ibmveth_device_table,
1304 .probe = ibmveth_probe,
1305 .remove = ibmveth_remove,
1306 .driver = {
1307 .name = ibmveth_driver_name,
1308 .owner = THIS_MODULE,
1310 };
1312 static int __init ibmveth_module_init(void)
1314 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
1316 ibmveth_proc_register_driver();
1318 return vio_register_driver(&ibmveth_driver);
1321 static void __exit ibmveth_module_exit(void)
1323 vio_unregister_driver(&ibmveth_driver);
1324 ibmveth_proc_unregister_driver();
1327 module_init(ibmveth_module_init);
1328 module_exit(ibmveth_module_exit);