ia64/linux-2.6.18-xen.hg

view arch/alpha/kernel/core_titan.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * linux/arch/alpha/kernel/core_titan.c
3 *
4 * Code common to all TITAN core logic chips.
5 */
7 #define __EXTERN_INLINE inline
8 #include <asm/io.h>
9 #include <asm/core_titan.h>
10 #undef __EXTERN_INLINE
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/pci.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
20 #include <asm/ptrace.h>
21 #include <asm/smp.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlbflush.h>
25 #include "proto.h"
26 #include "pci_impl.h"
28 /* Save Titan configuration data as the console had it set up. */
30 struct
31 {
32 unsigned long wsba[4];
33 unsigned long wsm[4];
34 unsigned long tba[4];
35 } saved_config[4] __attribute__((common));
37 /*
38 * BIOS32-style PCI interface:
39 */
41 #define DEBUG_CONFIG 0
43 #if DEBUG_CONFIG
44 # define DBG_CFG(args) printk args
45 #else
46 # define DBG_CFG(args)
47 #endif
50 /*
51 * Routines to access TIG registers.
52 */
53 static inline volatile unsigned long *
54 mk_tig_addr(int offset)
55 {
56 return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6));
57 }
59 static inline u8
60 titan_read_tig(int offset, u8 value)
61 {
62 volatile unsigned long *tig_addr = mk_tig_addr(offset);
63 return (u8)(*tig_addr & 0xff);
64 }
66 static inline void
67 titan_write_tig(int offset, u8 value)
68 {
69 volatile unsigned long *tig_addr = mk_tig_addr(offset);
70 *tig_addr = (unsigned long)value;
71 }
74 /*
75 * Given a bus, device, and function number, compute resulting
76 * configuration space address
77 * accordingly. It is therefore not safe to have concurrent
78 * invocations to configuration space access routines, but there
79 * really shouldn't be any need for this.
80 *
81 * Note that all config space accesses use Type 1 address format.
82 *
83 * Note also that type 1 is determined by non-zero bus number.
84 *
85 * Type 1:
86 *
87 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
88 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
89 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
90 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
91 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
92 *
93 * 31:24 reserved
94 * 23:16 bus number (8 bits = 128 possible buses)
95 * 15:11 Device number (5 bits)
96 * 10:8 function number
97 * 7:2 register number
98 *
99 * Notes:
100 * The function number selects which function of a multi-function device
101 * (e.g., SCSI and Ethernet).
102 *
103 * The register selects a DWORD (32 bit) register offset. Hence it
104 * doesn't get shifted by 2 bits as we want to "drop" the bottom two
105 * bits.
106 */
108 static int
109 mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
110 unsigned long *pci_addr, unsigned char *type1)
111 {
112 struct pci_controller *hose = pbus->sysdata;
113 unsigned long addr;
114 u8 bus = pbus->number;
116 DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
117 "pci_addr=0x%p, type1=0x%p)\n",
118 bus, device_fn, where, pci_addr, type1));
120 if (!pbus->parent) /* No parent means peer PCI bus. */
121 bus = 0;
122 *type1 = (bus != 0);
124 addr = (bus << 16) | (device_fn << 8) | where;
125 addr |= hose->config_space_base;
127 *pci_addr = addr;
128 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
129 return 0;
130 }
132 static int
133 titan_read_config(struct pci_bus *bus, unsigned int devfn, int where,
134 int size, u32 *value)
135 {
136 unsigned long addr;
137 unsigned char type1;
139 if (mk_conf_addr(bus, devfn, where, &addr, &type1))
140 return PCIBIOS_DEVICE_NOT_FOUND;
142 switch (size) {
143 case 1:
144 *value = __kernel_ldbu(*(vucp)addr);
145 break;
146 case 2:
147 *value = __kernel_ldwu(*(vusp)addr);
148 break;
149 case 4:
150 *value = *(vuip)addr;
151 break;
152 }
154 return PCIBIOS_SUCCESSFUL;
155 }
157 static int
158 titan_write_config(struct pci_bus *bus, unsigned int devfn, int where,
159 int size, u32 value)
160 {
161 unsigned long addr;
162 unsigned char type1;
164 if (mk_conf_addr(bus, devfn, where, &addr, &type1))
165 return PCIBIOS_DEVICE_NOT_FOUND;
167 switch (size) {
168 case 1:
169 __kernel_stb(value, *(vucp)addr);
170 mb();
171 __kernel_ldbu(*(vucp)addr);
172 break;
173 case 2:
174 __kernel_stw(value, *(vusp)addr);
175 mb();
176 __kernel_ldwu(*(vusp)addr);
177 break;
178 case 4:
179 *(vuip)addr = value;
180 mb();
181 *(vuip)addr;
182 break;
183 }
185 return PCIBIOS_SUCCESSFUL;
186 }
188 struct pci_ops titan_pci_ops =
189 {
190 .read = titan_read_config,
191 .write = titan_write_config,
192 };
195 void
196 titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
197 {
198 titan_pachip *pachip =
199 (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0;
200 titan_pachip_port *port;
201 volatile unsigned long *csr;
202 unsigned long value;
204 /* Get the right hose. */
205 port = &pachip->g_port;
206 if (hose->index & 2)
207 port = &pachip->a_port;
209 /* We can invalidate up to 8 tlb entries in a go. The flush
210 matches against <31:16> in the pci address.
211 Note that gtlbi* and atlbi* are in the same place in the g_port
212 and a_port, respectively, so the g_port offset can be used
213 even if hose is an a_port */
214 csr = &port->port_specific.g.gtlbia.csr;
215 if (((start ^ end) & 0xffff0000) == 0)
216 csr = &port->port_specific.g.gtlbiv.csr;
218 /* For TBIA, it doesn't matter what value we write. For TBI,
219 it's the shifted tag bits. */
220 value = (start & 0xffff0000) >> 12;
222 wmb();
223 *csr = value;
224 mb();
225 *csr;
226 }
228 static int
229 titan_query_agp(titan_pachip_port *port)
230 {
231 union TPAchipPCTL pctl;
233 /* set up APCTL */
234 pctl.pctl_q_whole = port->pctl.csr;
236 return pctl.pctl_r_bits.apctl_v_agp_present;
238 }
240 static void __init
241 titan_init_one_pachip_port(titan_pachip_port *port, int index)
242 {
243 struct pci_controller *hose;
245 hose = alloc_pci_controller();
246 if (index == 0)
247 pci_isa_hose = hose;
248 hose->io_space = alloc_resource();
249 hose->mem_space = alloc_resource();
251 /*
252 * This is for userland consumption. The 40-bit PIO bias that we
253 * use in the kernel through KSEG doesn't work in the page table
254 * based user mappings. (43-bit KSEG sign extends the physical
255 * address from bit 40 to hit the I/O bit - mapped addresses don't).
256 * So make sure we get the 43-bit PIO bias.
257 */
258 hose->sparse_mem_base = 0;
259 hose->sparse_io_base = 0;
260 hose->dense_mem_base
261 = (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL;
262 hose->dense_io_base
263 = (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL;
265 hose->config_space_base = TITAN_CONF(index);
266 hose->index = index;
268 hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS;
269 hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1;
270 hose->io_space->name = pci_io_names[index];
271 hose->io_space->flags = IORESOURCE_IO;
273 hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS;
274 hose->mem_space->end = hose->mem_space->start + 0xffffffff;
275 hose->mem_space->name = pci_mem_names[index];
276 hose->mem_space->flags = IORESOURCE_MEM;
278 if (request_resource(&ioport_resource, hose->io_space) < 0)
279 printk(KERN_ERR "Failed to request IO on hose %d\n", index);
280 if (request_resource(&iomem_resource, hose->mem_space) < 0)
281 printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
283 /*
284 * Save the existing PCI window translations. SRM will
285 * need them when we go to reboot.
286 */
287 saved_config[index].wsba[0] = port->wsba[0].csr;
288 saved_config[index].wsm[0] = port->wsm[0].csr;
289 saved_config[index].tba[0] = port->tba[0].csr;
291 saved_config[index].wsba[1] = port->wsba[1].csr;
292 saved_config[index].wsm[1] = port->wsm[1].csr;
293 saved_config[index].tba[1] = port->tba[1].csr;
295 saved_config[index].wsba[2] = port->wsba[2].csr;
296 saved_config[index].wsm[2] = port->wsm[2].csr;
297 saved_config[index].tba[2] = port->tba[2].csr;
299 saved_config[index].wsba[3] = port->wsba[3].csr;
300 saved_config[index].wsm[3] = port->wsm[3].csr;
301 saved_config[index].tba[3] = port->tba[3].csr;
303 /*
304 * Set up the PCI to main memory translation windows.
305 *
306 * Note: Window 3 on Titan is Scatter-Gather ONLY.
307 *
308 * Window 0 is scatter-gather 8MB at 8MB (for isa)
309 * Window 1 is direct access 1GB at 2GB
310 * Window 2 is scatter-gather 1GB at 3GB
311 */
312 hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
313 hose->sg_isa->align_entry = 8; /* 64KB for ISA */
315 hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0);
316 hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */
318 port->wsba[0].csr = hose->sg_isa->dma_base | 3;
319 port->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000;
320 port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes);
322 port->wsba[1].csr = __direct_map_base | 1;
323 port->wsm[1].csr = (__direct_map_size - 1) & 0xfff00000;
324 port->tba[1].csr = 0;
326 port->wsba[2].csr = hose->sg_pci->dma_base | 3;
327 port->wsm[2].csr = (hose->sg_pci->size - 1) & 0xfff00000;
328 port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes);
330 port->wsba[3].csr = 0;
332 /* Enable the Monster Window to make DAC pci64 possible. */
333 port->pctl.csr |= pctl_m_mwin;
335 /*
336 * If it's an AGP port, initialize agplastwr.
337 */
338 if (titan_query_agp(port))
339 port->port_specific.a.agplastwr.csr = __direct_map_base;
341 titan_pci_tbi(hose, 0, -1);
342 }
344 static void __init
345 titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
346 {
347 int pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
349 /* Init the ports in hose order... */
350 titan_init_one_pachip_port(&pachip0->g_port, 0); /* hose 0 */
351 if (pchip1_present)
352 titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */
353 titan_init_one_pachip_port(&pachip0->a_port, 2); /* hose 2 */
354 if (pchip1_present)
355 titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */
356 }
358 static void __init
359 titan_init_vga_hose(void)
360 {
361 #ifdef CONFIG_VGA_HOSE
362 u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
364 if (pu64[7] == 3) { /* TERM_TYPE == graphics */
365 struct pci_controller *hose;
366 int h = (pu64[30] >> 24) & 0xff; /* console hose # */
368 /*
369 * Our hose numbering matches the console's, so just find
370 * the right one...
371 */
372 for (hose = hose_head; hose; hose = hose->next) {
373 if (hose->index == h) break;
374 }
376 if (hose) {
377 printk("Console graphics on hose %d\n", hose->index);
378 pci_vga_hose = hose;
379 }
380 }
381 #endif /* CONFIG_VGA_HOSE */
382 }
384 void __init
385 titan_init_arch(void)
386 {
387 #if 0
388 printk("%s: titan_init_arch()\n", __FUNCTION__);
389 printk("%s: CChip registers:\n", __FUNCTION__);
390 printk("%s: CSR_CSC 0x%lx\n", __FUNCTION__, TITAN_cchip->csc.csr);
391 printk("%s: CSR_MTR 0x%lx\n", __FUNCTION__, TITAN_cchip->mtr.csr);
392 printk("%s: CSR_MISC 0x%lx\n", __FUNCTION__, TITAN_cchip->misc.csr);
393 printk("%s: CSR_DIM0 0x%lx\n", __FUNCTION__, TITAN_cchip->dim0.csr);
394 printk("%s: CSR_DIM1 0x%lx\n", __FUNCTION__, TITAN_cchip->dim1.csr);
395 printk("%s: CSR_DIR0 0x%lx\n", __FUNCTION__, TITAN_cchip->dir0.csr);
396 printk("%s: CSR_DIR1 0x%lx\n", __FUNCTION__, TITAN_cchip->dir1.csr);
397 printk("%s: CSR_DRIR 0x%lx\n", __FUNCTION__, TITAN_cchip->drir.csr);
399 printk("%s: DChip registers:\n", __FUNCTION__);
400 printk("%s: CSR_DSC 0x%lx\n", __FUNCTION__, TITAN_dchip->dsc.csr);
401 printk("%s: CSR_STR 0x%lx\n", __FUNCTION__, TITAN_dchip->str.csr);
402 printk("%s: CSR_DREV 0x%lx\n", __FUNCTION__, TITAN_dchip->drev.csr);
403 #endif
405 boot_cpuid = __hard_smp_processor_id();
407 /* With multiple PCI busses, we play with I/O as physical addrs. */
408 ioport_resource.end = ~0UL;
410 /* PCI DMA Direct Mapping is 1GB at 2GB. */
411 __direct_map_base = 0x80000000;
412 __direct_map_size = 0x40000000;
414 /* Init the PA chip(s). */
415 titan_init_pachips(TITAN_pachip0, TITAN_pachip1);
417 /* Check for graphic console location (if any). */
418 titan_init_vga_hose();
419 }
421 static void
422 titan_kill_one_pachip_port(titan_pachip_port *port, int index)
423 {
424 port->wsba[0].csr = saved_config[index].wsba[0];
425 port->wsm[0].csr = saved_config[index].wsm[0];
426 port->tba[0].csr = saved_config[index].tba[0];
428 port->wsba[1].csr = saved_config[index].wsba[1];
429 port->wsm[1].csr = saved_config[index].wsm[1];
430 port->tba[1].csr = saved_config[index].tba[1];
432 port->wsba[2].csr = saved_config[index].wsba[2];
433 port->wsm[2].csr = saved_config[index].wsm[2];
434 port->tba[2].csr = saved_config[index].tba[2];
436 port->wsba[3].csr = saved_config[index].wsba[3];
437 port->wsm[3].csr = saved_config[index].wsm[3];
438 port->tba[3].csr = saved_config[index].tba[3];
439 }
441 static void
442 titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
443 {
444 int pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
446 if (pchip1_present) {
447 titan_kill_one_pachip_port(&pachip1->g_port, 1);
448 titan_kill_one_pachip_port(&pachip1->a_port, 3);
449 }
450 titan_kill_one_pachip_port(&pachip0->g_port, 0);
451 titan_kill_one_pachip_port(&pachip0->a_port, 2);
452 }
454 void
455 titan_kill_arch(int mode)
456 {
457 titan_kill_pachips(TITAN_pachip0, TITAN_pachip1);
458 }
461 /*
462 * IO map support.
463 */
465 void __iomem *
466 titan_ioremap(unsigned long addr, unsigned long size)
467 {
468 int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT;
469 unsigned long baddr = addr & ~TITAN_HOSE_MASK;
470 unsigned long last = baddr + size - 1;
471 struct pci_controller *hose;
472 struct vm_struct *area;
473 unsigned long vaddr;
474 unsigned long *ptes;
475 unsigned long pfn;
477 /*
478 * Adjust the addr.
479 */
480 #ifdef CONFIG_VGA_HOSE
481 if (pci_vga_hose && __titan_is_mem_vga(addr)) {
482 h = pci_vga_hose->index;
483 addr += pci_vga_hose->mem_space->start;
484 }
485 #endif
487 /*
488 * Find the hose.
489 */
490 for (hose = hose_head; hose; hose = hose->next)
491 if (hose->index == h)
492 break;
493 if (!hose)
494 return NULL;
496 /*
497 * Is it direct-mapped?
498 */
499 if ((baddr >= __direct_map_base) &&
500 ((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
501 vaddr = addr - __direct_map_base + TITAN_MEM_BIAS;
502 return (void __iomem *) vaddr;
503 }
505 /*
506 * Check the scatter-gather arena.
507 */
508 if (hose->sg_pci &&
509 baddr >= (unsigned long)hose->sg_pci->dma_base &&
510 last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){
512 /*
513 * Adjust the limits (mappings must be page aligned)
514 */
515 baddr -= hose->sg_pci->dma_base;
516 last -= hose->sg_pci->dma_base;
517 baddr &= PAGE_MASK;
518 size = PAGE_ALIGN(last) - baddr;
520 /*
521 * Map it
522 */
523 area = get_vm_area(size, VM_IOREMAP);
524 if (!area)
525 return NULL;
527 ptes = hose->sg_pci->ptes;
528 for (vaddr = (unsigned long)area->addr;
529 baddr <= last;
530 baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
531 pfn = ptes[baddr >> PAGE_SHIFT];
532 if (!(pfn & 1)) {
533 printk("ioremap failed... pte not valid...\n");
534 vfree(area->addr);
535 return NULL;
536 }
537 pfn >>= 1; /* make it a true pfn */
539 if (__alpha_remap_area_pages(vaddr,
540 pfn << PAGE_SHIFT,
541 PAGE_SIZE, 0)) {
542 printk("FAILED to map...\n");
543 vfree(area->addr);
544 return NULL;
545 }
546 }
548 flush_tlb_all();
550 vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
551 return (void __iomem *) vaddr;
552 }
554 return NULL;
555 }
557 void
558 titan_iounmap(volatile void __iomem *xaddr)
559 {
560 unsigned long addr = (unsigned long) xaddr;
561 if (addr >= VMALLOC_START)
562 vfree((void *)(PAGE_MASK & addr));
563 }
565 int
566 titan_is_mmio(const volatile void __iomem *xaddr)
567 {
568 unsigned long addr = (unsigned long) xaddr;
570 if (addr >= VMALLOC_START)
571 return 1;
572 else
573 return (addr & 0x100000000UL) == 0;
574 }
576 #ifndef CONFIG_ALPHA_GENERIC
577 EXPORT_SYMBOL(titan_ioremap);
578 EXPORT_SYMBOL(titan_iounmap);
579 EXPORT_SYMBOL(titan_is_mmio);
580 #endif
582 /*
583 * AGP GART Support.
584 */
585 #include <linux/agp_backend.h>
586 #include <asm/agp_backend.h>
587 #include <linux/slab.h>
588 #include <linux/delay.h>
590 struct titan_agp_aperture {
591 struct pci_iommu_arena *arena;
592 long pg_start;
593 long pg_count;
594 };
596 static int
597 titan_agp_setup(alpha_agp_info *agp)
598 {
599 struct titan_agp_aperture *aper;
601 if (!alpha_agpgart_size)
602 return -ENOMEM;
604 aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL);
605 if (aper == NULL)
606 return -ENOMEM;
608 aper->arena = agp->hose->sg_pci;
609 aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
610 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
611 aper->pg_count - 1);
612 if (aper->pg_start < 0) {
613 printk(KERN_ERR "Failed to reserve AGP memory\n");
614 kfree(aper);
615 return -ENOMEM;
616 }
618 agp->aperture.bus_base =
619 aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
620 agp->aperture.size = aper->pg_count * PAGE_SIZE;
621 agp->aperture.sysdata = aper;
623 return 0;
624 }
626 static void
627 titan_agp_cleanup(alpha_agp_info *agp)
628 {
629 struct titan_agp_aperture *aper = agp->aperture.sysdata;
630 int status;
632 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
633 if (status == -EBUSY) {
634 printk(KERN_WARNING
635 "Attempted to release bound AGP memory - unbinding\n");
636 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
637 status = iommu_release(aper->arena, aper->pg_start,
638 aper->pg_count);
639 }
640 if (status < 0)
641 printk(KERN_ERR "Failed to release AGP memory\n");
643 kfree(aper);
644 kfree(agp);
645 }
647 static int
648 titan_agp_configure(alpha_agp_info *agp)
649 {
650 union TPAchipPCTL pctl;
651 titan_pachip_port *port = agp->private;
652 pctl.pctl_q_whole = port->pctl.csr;
654 /* Side-Band Addressing? */
655 pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba;
657 /* AGP Rate? */
658 pctl.pctl_r_bits.apctl_v_agp_rate = 0; /* 1x */
659 if (agp->mode.bits.rate & 2)
660 pctl.pctl_r_bits.apctl_v_agp_rate = 1; /* 2x */
661 #if 0
662 if (agp->mode.bits.rate & 4)
663 pctl.pctl_r_bits.apctl_v_agp_rate = 2; /* 4x */
664 #endif
666 /* RQ Depth? */
667 pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2;
668 pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7;
670 /*
671 * AGP Enable.
672 */
673 pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable;
675 /* Tell the user. */
676 printk("Enabling AGP: %dX%s\n",
677 1 << pctl.pctl_r_bits.apctl_v_agp_rate,
678 pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : "");
680 /* Write it. */
681 port->pctl.csr = pctl.pctl_q_whole;
683 /* And wait at least 5000 66MHz cycles (per Titan spec). */
684 udelay(100);
686 return 0;
687 }
689 static int
690 titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
691 {
692 struct titan_agp_aperture *aper = agp->aperture.sysdata;
693 return iommu_bind(aper->arena, aper->pg_start + pg_start,
694 mem->page_count, mem->memory);
695 }
697 static int
698 titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
699 {
700 struct titan_agp_aperture *aper = agp->aperture.sysdata;
701 return iommu_unbind(aper->arena, aper->pg_start + pg_start,
702 mem->page_count);
703 }
705 static unsigned long
706 titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
707 {
708 struct titan_agp_aperture *aper = agp->aperture.sysdata;
709 unsigned long baddr = addr - aper->arena->dma_base;
710 unsigned long pte;
712 if (addr < agp->aperture.bus_base ||
713 addr >= agp->aperture.bus_base + agp->aperture.size) {
714 printk("%s: addr out of range\n", __FUNCTION__);
715 return -EINVAL;
716 }
718 pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
719 if (!(pte & 1)) {
720 printk("%s: pte not valid\n", __FUNCTION__);
721 return -EINVAL;
722 }
724 return (pte >> 1) << PAGE_SHIFT;
725 }
727 struct alpha_agp_ops titan_agp_ops =
728 {
729 .setup = titan_agp_setup,
730 .cleanup = titan_agp_cleanup,
731 .configure = titan_agp_configure,
732 .bind = titan_agp_bind_memory,
733 .unbind = titan_agp_unbind_memory,
734 .translate = titan_agp_translate
735 };
737 alpha_agp_info *
738 titan_agp_info(void)
739 {
740 alpha_agp_info *agp;
741 struct pci_controller *hose;
742 titan_pachip_port *port;
743 int hosenum = -1;
744 union TPAchipPCTL pctl;
746 /*
747 * Find the AGP port.
748 */
749 port = &TITAN_pachip0->a_port;
750 if (titan_query_agp(port))
751 hosenum = 2;
752 if (hosenum < 0 &&
753 titan_query_agp(port = &TITAN_pachip1->a_port))
754 hosenum = 3;
756 /*
757 * Find the hose the port is on.
758 */
759 for (hose = hose_head; hose; hose = hose->next)
760 if (hose->index == hosenum)
761 break;
763 if (!hose || !hose->sg_pci)
764 return NULL;
766 /*
767 * Allocate the info structure.
768 */
769 agp = kmalloc(sizeof(*agp), GFP_KERNEL);
771 /*
772 * Fill it in.
773 */
774 agp->hose = hose;
775 agp->private = port;
776 agp->ops = &titan_agp_ops;
778 /*
779 * Aperture - not configured until ops.setup().
780 *
781 * FIXME - should we go ahead and allocate it here?
782 */
783 agp->aperture.bus_base = 0;
784 agp->aperture.size = 0;
785 agp->aperture.sysdata = NULL;
787 /*
788 * Capabilities.
789 */
790 agp->capability.lw = 0;
791 agp->capability.bits.rate = 3; /* 2x, 1x */
792 agp->capability.bits.sba = 1;
793 agp->capability.bits.rq = 7; /* 8 - 1 */
795 /*
796 * Mode.
797 */
798 pctl.pctl_q_whole = port->pctl.csr;
799 agp->mode.lw = 0;
800 agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate;
801 agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en;
802 agp->mode.bits.rq = 7; /* RQ Depth? */
803 agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en;
805 return agp;
806 }