ia64/linux-2.6.18-xen.hg

view arch/alpha/kernel/core_marvel.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * linux/arch/alpha/kernel/core_marvel.c
3 *
4 * Code common to all Marvel based systems.
5 */
7 #define __EXTERN_INLINE inline
8 #include <asm/io.h>
9 #include <asm/core_marvel.h>
10 #undef __EXTERN_INLINE
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/sched.h>
15 #include <linux/init.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mc146818rtc.h>
18 #include <linux/rtc.h>
19 #include <linux/module.h>
20 #include <linux/bootmem.h>
22 #include <asm/ptrace.h>
23 #include <asm/smp.h>
24 #include <asm/gct.h>
25 #include <asm/pgalloc.h>
26 #include <asm/tlbflush.h>
27 #include <asm/rtc.h>
29 #include "proto.h"
30 #include "pci_impl.h"
33 /*
34 * Debug helpers
35 */
36 #define DEBUG_CONFIG 0
38 #if DEBUG_CONFIG
39 # define DBG_CFG(args) printk args
40 #else
41 # define DBG_CFG(args)
42 #endif
45 /*
46 * Private data
47 */
48 static struct io7 *io7_head = NULL;
51 /*
52 * Helper functions
53 */
54 static unsigned long __attribute__ ((unused))
55 read_ev7_csr(int pe, unsigned long offset)
56 {
57 ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset);
58 unsigned long q;
60 mb();
61 q = ev7csr->csr;
62 mb();
64 return q;
65 }
67 static void __attribute__ ((unused))
68 write_ev7_csr(int pe, unsigned long offset, unsigned long q)
69 {
70 ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset);
72 mb();
73 ev7csr->csr = q;
74 mb();
75 }
77 static char * __init
78 mk_resource_name(int pe, int port, char *str)
79 {
80 char tmp[80];
81 char *name;
83 sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
84 name = alloc_bootmem(strlen(tmp) + 1);
85 strcpy(name, tmp);
87 return name;
88 }
90 inline struct io7 *
91 marvel_next_io7(struct io7 *prev)
92 {
93 return (prev ? prev->next : io7_head);
94 }
96 struct io7 *
97 marvel_find_io7(int pe)
98 {
99 struct io7 *io7;
101 for (io7 = io7_head; io7 && io7->pe != pe; io7 = io7->next)
102 continue;
104 return io7;
105 }
107 static struct io7 * __init
108 alloc_io7(unsigned int pe)
109 {
110 struct io7 *io7;
111 struct io7 *insp;
112 int h;
114 if (marvel_find_io7(pe)) {
115 printk(KERN_WARNING "IO7 at PE %d already allocated!\n", pe);
116 return NULL;
117 }
119 io7 = alloc_bootmem(sizeof(*io7));
120 io7->pe = pe;
121 spin_lock_init(&io7->irq_lock);
123 for (h = 0; h < 4; h++) {
124 io7->ports[h].io7 = io7;
125 io7->ports[h].port = h;
126 io7->ports[h].enabled = 0; /* default to disabled */
127 }
129 /*
130 * Insert in pe sorted order.
131 */
132 if (NULL == io7_head) /* empty list */
133 io7_head = io7;
134 else if (io7_head->pe > io7->pe) { /* insert at head */
135 io7->next = io7_head;
136 io7_head = io7;
137 } else { /* insert at position */
138 for (insp = io7_head; insp; insp = insp->next) {
139 if (insp->pe == io7->pe) {
140 printk(KERN_ERR "Too many IO7s at PE %d\n",
141 io7->pe);
142 return NULL;
143 }
145 if (NULL == insp->next ||
146 insp->next->pe > io7->pe) { /* insert here */
147 io7->next = insp->next;
148 insp->next = io7;
149 break;
150 }
151 }
153 if (NULL == insp) { /* couldn't insert ?!? */
154 printk(KERN_WARNING "Failed to insert IO7 at PE %d "
155 " - adding at head of list\n", io7->pe);
156 io7->next = io7_head;
157 io7_head = io7;
158 }
159 }
161 return io7;
162 }
164 void
165 io7_clear_errors(struct io7 *io7)
166 {
167 io7_port7_csrs *p7csrs;
168 io7_ioport_csrs *csrs;
169 int port;
172 /*
173 * First the IO ports.
174 */
175 for (port = 0; port < 4; port++) {
176 csrs = IO7_CSRS_KERN(io7->pe, port);
178 csrs->POx_ERR_SUM.csr = -1UL;
179 csrs->POx_TLB_ERR.csr = -1UL;
180 csrs->POx_SPL_COMPLT.csr = -1UL;
181 csrs->POx_TRANS_SUM.csr = -1UL;
182 }
184 /*
185 * Then the common ones.
186 */
187 p7csrs = IO7_PORT7_CSRS_KERN(io7->pe);
189 p7csrs->PO7_ERROR_SUM.csr = -1UL;
190 p7csrs->PO7_UNCRR_SYM.csr = -1UL;
191 p7csrs->PO7_CRRCT_SYM.csr = -1UL;
192 }
195 /*
196 * IO7 PCI, PCI/X, AGP configuration.
197 */
198 static void __init
199 io7_init_hose(struct io7 *io7, int port)
200 {
201 static int hose_index = 0;
203 struct pci_controller *hose = alloc_pci_controller();
204 struct io7_port *io7_port = &io7->ports[port];
205 io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, port);
206 int i;
208 hose->index = hose_index++; /* arbitrary */
210 /*
211 * We don't have an isa or legacy hose, but glibc expects to be
212 * able to use the bus == 0 / dev == 0 form of the iobase syscall
213 * to determine information about the i/o system. Since XFree86
214 * relies on glibc's determination to tell whether or not to use
215 * sparse access, we need to point the pci_isa_hose at a real hose
216 * so at least that determination is correct.
217 */
218 if (hose->index == 0)
219 pci_isa_hose = hose;
221 io7_port->csrs = csrs;
222 io7_port->hose = hose;
223 hose->sysdata = io7_port;
225 hose->io_space = alloc_resource();
226 hose->mem_space = alloc_resource();
228 /*
229 * Base addresses for userland consumption. Since these are going
230 * to be mapped, they are pure physical addresses.
231 */
232 hose->sparse_mem_base = hose->sparse_io_base = 0;
233 hose->dense_mem_base = IO7_MEM_PHYS(io7->pe, port);
234 hose->dense_io_base = IO7_IO_PHYS(io7->pe, port);
236 /*
237 * Base addresses and resource ranges for kernel consumption.
238 */
239 hose->config_space_base = (unsigned long)IO7_CONF_KERN(io7->pe, port);
241 hose->io_space->start = (unsigned long)IO7_IO_KERN(io7->pe, port);
242 hose->io_space->end = hose->io_space->start + IO7_IO_SPACE - 1;
243 hose->io_space->name = mk_resource_name(io7->pe, port, "IO");
244 hose->io_space->flags = IORESOURCE_IO;
246 hose->mem_space->start = (unsigned long)IO7_MEM_KERN(io7->pe, port);
247 hose->mem_space->end = hose->mem_space->start + IO7_MEM_SPACE - 1;
248 hose->mem_space->name = mk_resource_name(io7->pe, port, "MEM");
249 hose->mem_space->flags = IORESOURCE_MEM;
251 if (request_resource(&ioport_resource, hose->io_space) < 0)
252 printk(KERN_ERR "Failed to request IO on hose %d\n",
253 hose->index);
254 if (request_resource(&iomem_resource, hose->mem_space) < 0)
255 printk(KERN_ERR "Failed to request MEM on hose %d\n",
256 hose->index);
258 /*
259 * Save the existing DMA window settings for later restoration.
260 */
261 for (i = 0; i < 4; i++) {
262 io7_port->saved_wbase[i] = csrs->POx_WBASE[i].csr;
263 io7_port->saved_wmask[i] = csrs->POx_WMASK[i].csr;
264 io7_port->saved_tbase[i] = csrs->POx_TBASE[i].csr;
265 }
267 /*
268 * Set up the PCI to main memory translation windows.
269 *
270 * Window 0 is scatter-gather 8MB at 8MB
271 * Window 1 is direct access 1GB at 2GB
272 * Window 2 is scatter-gather (up-to) 1GB at 3GB
273 * Window 3 is disabled
274 */
276 /*
277 * TBIA before modifying windows.
278 */
279 marvel_pci_tbi(hose, 0, -1);
281 /*
282 * Set up window 0 for scatter-gather 8MB at 8MB.
283 */
284 hose->sg_isa = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
285 hose, 0x00800000, 0x00800000, 0);
286 hose->sg_isa->align_entry = 8; /* cache line boundary */
287 csrs->POx_WBASE[0].csr =
288 hose->sg_isa->dma_base | wbase_m_ena | wbase_m_sg;
289 csrs->POx_WMASK[0].csr = (hose->sg_isa->size - 1) & wbase_m_addr;
290 csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes);
292 /*
293 * Set up window 1 for direct-mapped 1GB at 2GB.
294 */
295 csrs->POx_WBASE[1].csr = __direct_map_base | wbase_m_ena;
296 csrs->POx_WMASK[1].csr = (__direct_map_size - 1) & wbase_m_addr;
297 csrs->POx_TBASE[1].csr = 0;
299 /*
300 * Set up window 2 for scatter-gather (up-to) 1GB at 3GB.
301 */
302 hose->sg_pci = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
303 hose, 0xc0000000, 0x40000000, 0);
304 hose->sg_pci->align_entry = 8; /* cache line boundary */
305 csrs->POx_WBASE[2].csr =
306 hose->sg_pci->dma_base | wbase_m_ena | wbase_m_sg;
307 csrs->POx_WMASK[2].csr = (hose->sg_pci->size - 1) & wbase_m_addr;
308 csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes);
310 /*
311 * Disable window 3.
312 */
313 csrs->POx_WBASE[3].csr = 0;
315 /*
316 * Make sure that the AGP Monster Window is disabled.
317 */
318 csrs->POx_CTRL.csr &= ~(1UL << 61);
320 #if 1
321 printk("FIXME: disabling master aborts\n");
322 csrs->POx_MSK_HEI.csr &= ~(3UL << 14);
323 #endif
324 /*
325 * TBIA after modifying windows.
326 */
327 marvel_pci_tbi(hose, 0, -1);
328 }
330 static void __init
331 marvel_init_io7(struct io7 *io7)
332 {
333 int i;
335 printk("Initializing IO7 at PID %d\n", io7->pe);
337 /*
338 * Get the Port 7 CSR pointer.
339 */
340 io7->csrs = IO7_PORT7_CSRS_KERN(io7->pe);
342 /*
343 * Init this IO7's hoses.
344 */
345 for (i = 0; i < IO7_NUM_PORTS; i++) {
346 io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, i);
347 if (csrs->POx_CACHE_CTL.csr == 8) {
348 io7->ports[i].enabled = 1;
349 io7_init_hose(io7, i);
350 }
351 }
352 }
354 void
355 marvel_io7_present(gct6_node *node)
356 {
357 int pe;
359 if (node->type != GCT_TYPE_HOSE ||
360 node->subtype != GCT_SUBTYPE_IO_PORT_MODULE)
361 return;
363 pe = (node->id >> 8) & 0xff;
364 printk("Found an IO7 at PID %d\n", pe);
366 alloc_io7(pe);
367 }
369 static void __init
370 marvel_init_vga_hose(void)
371 {
372 #ifdef CONFIG_VGA_HOSE
373 u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
375 if (pu64[7] == 3) { /* TERM_TYPE == graphics */
376 struct pci_controller *hose = NULL;
377 int h = (pu64[30] >> 24) & 0xff; /* TERM_OUT_LOC, hose # */
378 struct io7 *io7;
379 int pid, port;
381 /* FIXME - encoding is going to have to change for Marvel
382 * since hose will be able to overflow a byte...
383 * need to fix this decode when the console
384 * changes its encoding
385 */
386 printk("console graphics is on hose %d (console)\n", h);
388 /*
389 * The console's hose numbering is:
390 *
391 * hose<n:2>: PID
392 * hose<1:0>: PORT
393 *
394 * We need to find the hose at that pid and port
395 */
396 pid = h >> 2;
397 port = h & 3;
398 if ((io7 = marvel_find_io7(pid)))
399 hose = io7->ports[port].hose;
401 if (hose) {
402 printk("Console graphics on hose %d\n", hose->index);
403 pci_vga_hose = hose;
404 }
405 }
406 #endif /* CONFIG_VGA_HOSE */
407 }
409 gct6_search_struct gct_wanted_node_list[] = {
410 { GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present },
411 { 0, 0, NULL }
412 };
414 /*
415 * In case the GCT is not complete, let the user specify PIDs with IO7s
416 * at boot time. Syntax is 'io7=a,b,c,...,n' where a-n are the PIDs (decimal)
417 * where IO7s are connected
418 */
419 static int __init
420 marvel_specify_io7(char *str)
421 {
422 unsigned long pid;
423 struct io7 *io7;
424 char *pchar;
426 do {
427 pid = simple_strtoul(str, &pchar, 0);
428 if (pchar != str) {
429 printk("User-specified IO7 at PID %lu\n", pid);
430 io7 = alloc_io7(pid);
431 if (io7) marvel_init_io7(io7);
432 }
434 if (pchar == str) pchar++;
435 str = pchar;
436 } while(*str);
438 return 1;
439 }
440 __setup("io7=", marvel_specify_io7);
442 void __init
443 marvel_init_arch(void)
444 {
445 struct io7 *io7;
447 /* With multiple PCI busses, we play with I/O as physical addrs. */
448 ioport_resource.end = ~0UL;
450 /* PCI DMA Direct Mapping is 1GB at 2GB. */
451 __direct_map_base = 0x80000000;
452 __direct_map_size = 0x40000000;
454 /* Parse the config tree. */
455 gct6_find_nodes(GCT_NODE_PTR(0), gct_wanted_node_list);
457 /* Init the io7s. */
458 for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); )
459 marvel_init_io7(io7);
461 /* Check for graphic console location (if any). */
462 marvel_init_vga_hose();
463 }
465 void
466 marvel_kill_arch(int mode)
467 {
468 }
471 /*
472 * PCI Configuration Space access functions
473 *
474 * Configuration space addresses have the following format:
475 *
476 * |2 2 2 2|1 1 1 1|1 1 1 1|1 1
477 * |3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
478 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
479 * |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|R|R|
480 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
481 *
482 * n:24 reserved for hose base
483 * 23:16 bus number (8 bits = 128 possible buses)
484 * 15:11 Device number (5 bits)
485 * 10:8 function number
486 * 7:2 register number
487 *
488 * Notes:
489 * IO7 determines whether to use a type 0 or type 1 config cycle
490 * based on the bus number. Therefore the bus number must be set
491 * to 0 for the root bus on any hose.
492 *
493 * The function number selects which function of a multi-function device
494 * (e.g., SCSI and Ethernet).
495 *
496 */
498 static inline unsigned long
499 build_conf_addr(struct pci_controller *hose, u8 bus,
500 unsigned int devfn, int where)
501 {
502 return (hose->config_space_base | (bus << 16) | (devfn << 8) | where);
503 }
505 static unsigned long
506 mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where)
507 {
508 struct pci_controller *hose = pbus->sysdata;
509 struct io7_port *io7_port;
510 unsigned long addr = 0;
511 u8 bus = pbus->number;
513 if (!hose)
514 return addr;
516 /* Check for enabled. */
517 io7_port = hose->sysdata;
518 if (!io7_port->enabled)
519 return addr;
521 if (!pbus->parent) { /* No parent means peer PCI bus. */
522 /* Don't support idsel > 20 on primary bus. */
523 if (devfn >= PCI_DEVFN(21, 0))
524 return addr;
525 bus = 0;
526 }
528 addr = build_conf_addr(hose, bus, devfn, where);
530 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
531 return addr;
532 }
534 static int
535 marvel_read_config(struct pci_bus *bus, unsigned int devfn, int where,
536 int size, u32 *value)
537 {
538 unsigned long addr;
540 if (0 == (addr = mk_conf_addr(bus, devfn, where)))
541 return PCIBIOS_DEVICE_NOT_FOUND;
543 switch(size) {
544 case 1:
545 *value = __kernel_ldbu(*(vucp)addr);
546 break;
547 case 2:
548 *value = __kernel_ldwu(*(vusp)addr);
549 break;
550 case 4:
551 *value = *(vuip)addr;
552 break;
553 default:
554 return PCIBIOS_FUNC_NOT_SUPPORTED;
555 }
557 return PCIBIOS_SUCCESSFUL;
558 }
560 static int
561 marvel_write_config(struct pci_bus *bus, unsigned int devfn, int where,
562 int size, u32 value)
563 {
564 unsigned long addr;
566 if (0 == (addr = mk_conf_addr(bus, devfn, where)))
567 return PCIBIOS_DEVICE_NOT_FOUND;
569 switch (size) {
570 case 1:
571 __kernel_stb(value, *(vucp)addr);
572 mb();
573 __kernel_ldbu(*(vucp)addr);
574 break;
575 case 2:
576 __kernel_stw(value, *(vusp)addr);
577 mb();
578 __kernel_ldwu(*(vusp)addr);
579 break;
580 case 4:
581 *(vuip)addr = value;
582 mb();
583 *(vuip)addr;
584 break;
585 default:
586 return PCIBIOS_FUNC_NOT_SUPPORTED;
587 }
589 return PCIBIOS_SUCCESSFUL;
590 }
592 struct pci_ops marvel_pci_ops =
593 {
594 .read = marvel_read_config,
595 .write = marvel_write_config,
596 };
599 /*
600 * Other PCI helper functions.
601 */
602 void
603 marvel_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
604 {
605 io7_ioport_csrs *csrs = ((struct io7_port *)hose->sysdata)->csrs;
607 wmb();
608 csrs->POx_SG_TBIA.csr = 0;
609 mb();
610 csrs->POx_SG_TBIA.csr;
611 }
615 /*
616 * RTC Support
617 */
618 struct marvel_rtc_access_info {
619 unsigned long function;
620 unsigned long index;
621 unsigned long data;
622 };
624 static void
625 __marvel_access_rtc(void *info)
626 {
627 struct marvel_rtc_access_info *rtc_access = info;
629 register unsigned long __r0 __asm__("$0");
630 register unsigned long __r16 __asm__("$16") = rtc_access->function;
631 register unsigned long __r17 __asm__("$17") = rtc_access->index;
632 register unsigned long __r18 __asm__("$18") = rtc_access->data;
634 __asm__ __volatile__(
635 "call_pal %4 # cserve rtc"
636 : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r0)
637 : "i"(PAL_cserve), "0"(__r16), "1"(__r17), "2"(__r18)
638 : "$1", "$22", "$23", "$24", "$25");
640 rtc_access->data = __r0;
641 }
643 static u8
644 __marvel_rtc_io(u8 b, unsigned long addr, int write)
645 {
646 static u8 index = 0;
648 struct marvel_rtc_access_info rtc_access;
649 u8 ret = 0;
651 switch(addr) {
652 case 0x70: /* RTC_PORT(0) */
653 if (write) index = b;
654 ret = index;
655 break;
657 case 0x71: /* RTC_PORT(1) */
658 rtc_access.index = index;
659 rtc_access.data = BCD_TO_BIN(b);
660 rtc_access.function = 0x48 + !write; /* GET/PUT_TOY */
662 #ifdef CONFIG_SMP
663 if (smp_processor_id() != boot_cpuid)
664 smp_call_function_on_cpu(__marvel_access_rtc,
665 &rtc_access, 1, 1,
666 cpumask_of_cpu(boot_cpuid));
667 else
668 __marvel_access_rtc(&rtc_access);
669 #else
670 __marvel_access_rtc(&rtc_access);
671 #endif
672 ret = BIN_TO_BCD(rtc_access.data);
673 break;
675 default:
676 printk(KERN_WARNING "Illegal RTC port %lx\n", addr);
677 break;
678 }
680 return ret;
681 }
684 /*
685 * IO map support.
686 */
688 #define __marvel_is_mem_vga(a) (((a) >= 0xa0000) && ((a) <= 0xc0000))
690 void __iomem *
691 marvel_ioremap(unsigned long addr, unsigned long size)
692 {
693 struct pci_controller *hose;
694 unsigned long baddr, last;
695 struct vm_struct *area;
696 unsigned long vaddr;
697 unsigned long *ptes;
698 unsigned long pfn;
700 /*
701 * Adjust the addr.
702 */
703 #ifdef CONFIG_VGA_HOSE
704 if (pci_vga_hose && __marvel_is_mem_vga(addr)) {
705 addr += pci_vga_hose->mem_space->start;
706 }
707 #endif
709 /*
710 * Find the hose.
711 */
712 for (hose = hose_head; hose; hose = hose->next) {
713 if ((addr >> 32) == (hose->mem_space->start >> 32))
714 break;
715 }
716 if (!hose)
717 return NULL;
719 /*
720 * We have the hose - calculate the bus limits.
721 */
722 baddr = addr - hose->mem_space->start;
723 last = baddr + size - 1;
725 /*
726 * Is it direct-mapped?
727 */
728 if ((baddr >= __direct_map_base) &&
729 ((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
730 addr = IDENT_ADDR | (baddr - __direct_map_base);
731 return (void __iomem *) addr;
732 }
734 /*
735 * Check the scatter-gather arena.
736 */
737 if (hose->sg_pci &&
738 baddr >= (unsigned long)hose->sg_pci->dma_base &&
739 last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size) {
741 /*
742 * Adjust the limits (mappings must be page aligned)
743 */
744 baddr -= hose->sg_pci->dma_base;
745 last -= hose->sg_pci->dma_base;
746 baddr &= PAGE_MASK;
747 size = PAGE_ALIGN(last) - baddr;
749 /*
750 * Map it.
751 */
752 area = get_vm_area(size, VM_IOREMAP);
753 if (!area)
754 return NULL;
756 ptes = hose->sg_pci->ptes;
757 for (vaddr = (unsigned long)area->addr;
758 baddr <= last;
759 baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
760 pfn = ptes[baddr >> PAGE_SHIFT];
761 if (!(pfn & 1)) {
762 printk("ioremap failed... pte not valid...\n");
763 vfree(area->addr);
764 return NULL;
765 }
766 pfn >>= 1; /* make it a true pfn */
768 if (__alpha_remap_area_pages(vaddr,
769 pfn << PAGE_SHIFT,
770 PAGE_SIZE, 0)) {
771 printk("FAILED to map...\n");
772 vfree(area->addr);
773 return NULL;
774 }
775 }
777 flush_tlb_all();
779 vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
781 return (void __iomem *) vaddr;
782 }
784 return NULL;
785 }
787 void
788 marvel_iounmap(volatile void __iomem *xaddr)
789 {
790 unsigned long addr = (unsigned long) xaddr;
791 if (addr >= VMALLOC_START)
792 vfree((void *)(PAGE_MASK & addr));
793 }
795 int
796 marvel_is_mmio(const volatile void __iomem *xaddr)
797 {
798 unsigned long addr = (unsigned long) xaddr;
800 if (addr >= VMALLOC_START)
801 return 1;
802 else
803 return (addr & 0xFF000000UL) == 0;
804 }
806 #define __marvel_is_port_vga(a) \
807 (((a) >= 0x3b0) && ((a) < 0x3e0) && ((a) != 0x3b3) && ((a) != 0x3d3))
808 #define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64))
809 #define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71))
811 void __iomem *marvel_ioportmap (unsigned long addr)
812 {
813 if (__marvel_is_port_rtc (addr) || __marvel_is_port_kbd(addr))
814 ;
815 #ifdef CONFIG_VGA_HOSE
816 else if (__marvel_is_port_vga (addr) && pci_vga_hose)
817 addr += pci_vga_hose->io_space->start;
818 #endif
819 else
820 return NULL;
821 return (void __iomem *)addr;
822 }
824 unsigned int
825 marvel_ioread8(void __iomem *xaddr)
826 {
827 unsigned long addr = (unsigned long) xaddr;
828 if (__marvel_is_port_kbd(addr))
829 return 0;
830 else if (__marvel_is_port_rtc(addr))
831 return __marvel_rtc_io(0, addr, 0);
832 else
833 return __kernel_ldbu(*(vucp)addr);
834 }
836 void
837 marvel_iowrite8(u8 b, void __iomem *xaddr)
838 {
839 unsigned long addr = (unsigned long) xaddr;
840 if (__marvel_is_port_kbd(addr))
841 return;
842 else if (__marvel_is_port_rtc(addr))
843 __marvel_rtc_io(b, addr, 1);
844 else
845 __kernel_stb(b, *(vucp)addr);
846 }
848 #ifndef CONFIG_ALPHA_GENERIC
849 EXPORT_SYMBOL(marvel_ioremap);
850 EXPORT_SYMBOL(marvel_iounmap);
851 EXPORT_SYMBOL(marvel_is_mmio);
852 EXPORT_SYMBOL(marvel_ioportmap);
853 EXPORT_SYMBOL(marvel_ioread8);
854 EXPORT_SYMBOL(marvel_iowrite8);
855 #endif
857 /*
858 * NUMA Support
859 */
860 /**********
861 * FIXME - for now each cpu is a node by itself
862 * -- no real support for striped mode
863 **********
864 */
865 int
866 marvel_pa_to_nid(unsigned long pa)
867 {
868 int cpuid;
870 if ((pa >> 43) & 1) /* I/O */
871 cpuid = (~(pa >> 35) & 0xff);
872 else /* mem */
873 cpuid = ((pa >> 34) & 0x3) | ((pa >> (37 - 2)) & (0x1f << 2));
875 return marvel_cpuid_to_nid(cpuid);
876 }
878 int
879 marvel_cpuid_to_nid(int cpuid)
880 {
881 return cpuid;
882 }
884 unsigned long
885 marvel_node_mem_start(int nid)
886 {
887 unsigned long pa;
889 pa = (nid & 0x3) | ((nid & (0x1f << 2)) << 1);
890 pa <<= 34;
892 return pa;
893 }
895 unsigned long
896 marvel_node_mem_size(int nid)
897 {
898 return 16UL * 1024 * 1024 * 1024; /* 16GB */
899 }
902 /*
903 * AGP GART Support.
904 */
905 #include <linux/agp_backend.h>
906 #include <asm/agp_backend.h>
907 #include <linux/slab.h>
908 #include <linux/delay.h>
910 struct marvel_agp_aperture {
911 struct pci_iommu_arena *arena;
912 long pg_start;
913 long pg_count;
914 };
916 static int
917 marvel_agp_setup(alpha_agp_info *agp)
918 {
919 struct marvel_agp_aperture *aper;
921 if (!alpha_agpgart_size)
922 return -ENOMEM;
924 aper = kmalloc(sizeof(*aper), GFP_KERNEL);
925 if (aper == NULL) return -ENOMEM;
927 aper->arena = agp->hose->sg_pci;
928 aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
929 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
930 aper->pg_count - 1);
932 if (aper->pg_start < 0) {
933 printk(KERN_ERR "Failed to reserve AGP memory\n");
934 kfree(aper);
935 return -ENOMEM;
936 }
938 agp->aperture.bus_base =
939 aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
940 agp->aperture.size = aper->pg_count * PAGE_SIZE;
941 agp->aperture.sysdata = aper;
943 return 0;
944 }
946 static void
947 marvel_agp_cleanup(alpha_agp_info *agp)
948 {
949 struct marvel_agp_aperture *aper = agp->aperture.sysdata;
950 int status;
952 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
953 if (status == -EBUSY) {
954 printk(KERN_WARNING
955 "Attempted to release bound AGP memory - unbinding\n");
956 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
957 status = iommu_release(aper->arena, aper->pg_start,
958 aper->pg_count);
959 }
960 if (status < 0)
961 printk(KERN_ERR "Failed to release AGP memory\n");
963 kfree(aper);
964 kfree(agp);
965 }
967 static int
968 marvel_agp_configure(alpha_agp_info *agp)
969 {
970 io7_ioport_csrs *csrs = ((struct io7_port *)agp->hose->sysdata)->csrs;
971 struct io7 *io7 = ((struct io7_port *)agp->hose->sysdata)->io7;
972 unsigned int new_rate = 0;
973 unsigned long agp_pll;
975 /*
976 * Check the requested mode against the PLL setting.
977 * The agpgart_be code has not programmed the card yet,
978 * so we can still tweak mode here.
979 */
980 agp_pll = io7->csrs->POx_RST[IO7_AGP_PORT].csr;
981 switch(IO7_PLL_RNGB(agp_pll)) {
982 case 0x4: /* 2x only */
983 /*
984 * The PLL is only programmed for 2x, so adjust the
985 * rate to 2x, if necessary.
986 */
987 if (agp->mode.bits.rate != 2)
988 new_rate = 2;
989 break;
991 case 0x6: /* 1x / 4x */
992 /*
993 * The PLL is programmed for 1x or 4x. Don't go faster
994 * than requested, so if the requested rate is 2x, use 1x.
995 */
996 if (agp->mode.bits.rate == 2)
997 new_rate = 1;
998 break;
1000 default: /* ??????? */
1001 /*
1002 * Don't know what this PLL setting is, take the requested
1003 * rate, but warn the user.
1004 */
1005 printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n",
1006 __FUNCTION__, IO7_PLL_RNGB(agp_pll), agp_pll);
1007 break;
1010 /*
1011 * Set the new rate, if necessary.
1012 */
1013 if (new_rate) {
1014 printk("Requested AGP Rate %dX not compatible "
1015 "with PLL setting - using %dX\n",
1016 agp->mode.bits.rate,
1017 new_rate);
1019 agp->mode.bits.rate = new_rate;
1022 printk("Enabling AGP on hose %d: %dX%s RQ %d\n",
1023 agp->hose->index, agp->mode.bits.rate,
1024 agp->mode.bits.sba ? " - SBA" : "", agp->mode.bits.rq);
1026 csrs->AGP_CMD.csr = agp->mode.lw;
1028 return 0;
1031 static int
1032 marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
1034 struct marvel_agp_aperture *aper = agp->aperture.sysdata;
1035 return iommu_bind(aper->arena, aper->pg_start + pg_start,
1036 mem->page_count, mem->memory);
1039 static int
1040 marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
1042 struct marvel_agp_aperture *aper = agp->aperture.sysdata;
1043 return iommu_unbind(aper->arena, aper->pg_start + pg_start,
1044 mem->page_count);
1047 static unsigned long
1048 marvel_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
1050 struct marvel_agp_aperture *aper = agp->aperture.sysdata;
1051 unsigned long baddr = addr - aper->arena->dma_base;
1052 unsigned long pte;
1054 if (addr < agp->aperture.bus_base ||
1055 addr >= agp->aperture.bus_base + agp->aperture.size) {
1056 printk("%s: addr out of range\n", __FUNCTION__);
1057 return -EINVAL;
1060 pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
1061 if (!(pte & 1)) {
1062 printk("%s: pte not valid\n", __FUNCTION__);
1063 return -EINVAL;
1065 return (pte >> 1) << PAGE_SHIFT;
1068 struct alpha_agp_ops marvel_agp_ops =
1070 .setup = marvel_agp_setup,
1071 .cleanup = marvel_agp_cleanup,
1072 .configure = marvel_agp_configure,
1073 .bind = marvel_agp_bind_memory,
1074 .unbind = marvel_agp_unbind_memory,
1075 .translate = marvel_agp_translate
1076 };
1078 alpha_agp_info *
1079 marvel_agp_info(void)
1081 struct pci_controller *hose;
1082 io7_ioport_csrs *csrs;
1083 alpha_agp_info *agp;
1084 struct io7 *io7;
1086 /*
1087 * Find the first IO7 with an AGP card.
1089 * FIXME -- there should be a better way (we want to be able to
1090 * specify and what if the agp card is not video???)
1091 */
1092 hose = NULL;
1093 for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) {
1094 struct pci_controller *h;
1095 vuip addr;
1097 if (!io7->ports[IO7_AGP_PORT].enabled)
1098 continue;
1100 h = io7->ports[IO7_AGP_PORT].hose;
1101 addr = (vuip)build_conf_addr(h, 0, PCI_DEVFN(5, 0), 0);
1103 if (*addr != 0xffffffffu) {
1104 hose = h;
1105 break;
1109 if (!hose || !hose->sg_pci)
1110 return NULL;
1112 printk("MARVEL - using hose %d as AGP\n", hose->index);
1114 /*
1115 * Get the csrs from the hose.
1116 */
1117 csrs = ((struct io7_port *)hose->sysdata)->csrs;
1119 /*
1120 * Allocate the info structure.
1121 */
1122 agp = kmalloc(sizeof(*agp), GFP_KERNEL);
1124 /*
1125 * Fill it in.
1126 */
1127 agp->hose = hose;
1128 agp->private = NULL;
1129 agp->ops = &marvel_agp_ops;
1131 /*
1132 * Aperture - not configured until ops.setup().
1133 */
1134 agp->aperture.bus_base = 0;
1135 agp->aperture.size = 0;
1136 agp->aperture.sysdata = NULL;
1138 /*
1139 * Capabilities.
1141 * NOTE: IO7 reports through AGP_STAT that it can support a read queue
1142 * depth of 17 (rq = 0x10). It actually only supports a depth of
1143 * 16 (rq = 0xf).
1144 */
1145 agp->capability.lw = csrs->AGP_STAT.csr;
1146 agp->capability.bits.rq = 0xf;
1148 /*
1149 * Mode.
1150 */
1151 agp->mode.lw = csrs->AGP_CMD.csr;
1153 return agp;