]> xenbits.xensource.com Git - people/aperard/linux-chromebook.git/commitdiff
MALI: Update to wk45 r1p2-06bet0 release
authorRay Smith <Raymond.Smith@arm.com>
Tue, 20 Nov 2012 11:57:34 +0000 (11:57 +0000)
committerGerrit <chrome-bot@google.com>
Fri, 14 Dec 2012 02:32:33 +0000 (18:32 -0800)
Change-Id: I9c50a50150c2c7de5265b72ee5adb8f4d561de35
Reviewed-on: https://gerrit.chromium.org/gerrit/38388
Reviewed-by: Gabriele Paoloni <gabriele.paoloni@arm.com>
Tested-by: Anush Elangovan <anush@chromium.org>
Reviewed-by: Anush Elangovan <anush@chromium.org>
Commit-Ready: Anush Elangovan <anush@chromium.org>

24 files changed:
drivers/gpu/arm/t6xx/kbase/Kconfig
drivers/gpu/arm/t6xx/kbase/mali_base_kernel.h
drivers/gpu/arm/t6xx/kbase/osk/include/mali_osk_lock_order.h
drivers/gpu/arm/t6xx/kbase/osk/include/mali_osk_low_level_dedicated_mem.h [deleted file]
drivers/gpu/arm/t6xx/kbase/osk/include/mali_osk_low_level_mem.h
drivers/gpu/arm/t6xx/kbase/osk/src/linux/include/osk/mali_osk_arch_low_level_mem.h
drivers/gpu/arm/t6xx/kbase/src/Kbuild
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_8401_workaround.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cache_policy.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_context.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_defs.h
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_device.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jd.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jm.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mem.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mem.h
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mmu.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_coarse_demand.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_driver.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_metrics.c
drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_softjobs.c
drivers/gpu/arm/t6xx/kbase/src/linux/mali_kbase_core_linux.c
drivers/gpu/arm/t6xx/kbase/src/linux/mali_kbase_mem_linux.c

index 898f6cec0c0c33cc26eab24a6c564f9cd278a9f6..e1785208d18acb96eab81bd6087ca2871800b477 100644 (file)
@@ -28,6 +28,43 @@ config MALI_GATOR_SUPPORT
          You will need the Gator device driver already loaded before loading this driver when enabling
          Streamline debug support.
 
+config MALI_T6XX_DVFS
+       bool "Enable DVFS"
+       depends on MALI_T6XX && MACH_MANTA
+       default n
+       help
+         Choose this option to enable DVFS on MALI T6XX DDK.
+
+config MALI_T6XX_DEMAND_POLICY
+       bool "Enable demand power policy by default"
+       depends on MALI_T6XX
+       default n
+       help
+         Sets the default power policy to "demand"
+
+config MALI_T6XX_RT_PM
+       bool "Enable Runtime power management"
+       depends on MALI_T6XX
+       depends on PM_RUNTIME
+       default n
+       help
+         Choose this option to enable runtime power management on vithar DDK.
+
+config MALI_T6XX_ENABLE_TRACE
+       bool "Enable kbase tracing"
+       depends on MALI_T6XX
+       default n
+       help
+         Enables tracing in the kbase.  Trace log available through
+        the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled
+
+config MALI_T6XX_DEBUG_SYS
+       bool "Enable sysfs for mali t6xx"
+       depends on MALI_T6XX && SYSFS
+       default n
+       help
+         Enables sysfs for mali t6xx device. Set/Monitor Mali T6xx Device
+
 # MALI_EXPERT configuration options
 
 menuconfig MALI_EXPERT
index bc2dbf11353ddc22904370ec3c1da7de4882421c..8d072839ba9c89de4af9040ddf625ec72033bdee 100644 (file)
@@ -113,13 +113,14 @@ enum
        BASE_MEM_PROT_GPU_RD =      (1U << 2), /**< Read access GPU side */
        BASE_MEM_PROT_GPU_WR =      (1U << 3), /**< Write access GPU side */
        BASE_MEM_PROT_GPU_EX =      (1U << 4), /**< Execute allowed on the GPU side */
-       BASE_MEM_CACHED      =      (1U << 5), /**< Should be cached */
+       BASE_MEM_CACHED_CPU  =      (1U << 5), /**< Should be cached on the CPU */
 
        BASEP_MEM_GROWABLE   =      (1U << 6), /**< Growable memory. This is a private flag that is set automatically. Not valid for PMEM. */
        BASE_MEM_GROW_ON_GPF =      (1U << 7), /**< Grow backing store on GPU Page Fault */
 
-       BASE_MEM_COHERENT_SYSTEM =  (1U << 8),/**< Page coherence Outer shareable */
-       BASE_MEM_COHERENT_LOCAL =   (1U << 9) /**< Page coherence Inner shareable */
+       BASE_MEM_COHERENT_SYSTEM =  (1U << 8), /**< Page coherence Outer shareable */
+       BASE_MEM_COHERENT_LOCAL =   (1U << 9), /**< Page coherence Inner shareable */
+       BASE_MEM_DONT_ZERO_INIT =   (1U << 10) /**< Optimization: No need to zero initialize */
 };
 
 /**
@@ -159,7 +160,7 @@ enum
  *
  * Must be kept in sync with the ::base_mem_alloc_flags flags
  */
-#define BASE_MEM_FLAGS_NR_BITS  10
+#define BASE_MEM_FLAGS_NR_BITS  11
 
 /**
  * @brief Result codes of changing the size of the backing store allocated to a tmem region
index dfe40d97da36820cf006a9c96588d90842700f75..87ac1bb313333cbadbd904429223c2c652fd8785 100644 (file)
@@ -225,6 +225,13 @@ typedef enum
         */
        OSK_LOCK_ORDER_BASE_REG_QUEUE,
 
+#ifdef CONFIG_MALI_T6XX_RT_PM
+       /**
+        * System power for mali-t604
+        */
+       OSK_LOCK_ORDER_CMU_PMU,
+#endif
+
        /**
         * Reserved mutex order, indicating that the mutex will be the first to be
         * locked, and all other OSK mutexes are obtained after this one.
diff --git a/drivers/gpu/arm/t6xx/kbase/osk/include/mali_osk_low_level_dedicated_mem.h b/drivers/gpu/arm/t6xx/kbase/osk/include/mali_osk_low_level_dedicated_mem.h
deleted file mode 100644 (file)
index c9f3ac6..0000000
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2008-2012 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- *
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- *
- */
-
-
-
-/**
- * @file
- * Implementation of the dedicated memory allocator for the kernel device driver
- */
-
-#ifndef _OSK_LOW_LEVEL_DEDICATED_MEM_H_
-#define _OSK_LOW_LEVEL_DEDICATED_MEM_H_
-
-#include <linux/io.h>
-
-struct oskp_phy_dedicated_allocator
-{
-       /* lock to protect the free map management */
-       osk_mutex        lock;
-       
-       osk_phy_addr     base;
-       u32              num_pages;
-       u32              free_pages;
-
-       unsigned long *  free_map;
-};
-
-OSK_STATIC_INLINE osk_error oskp_phy_dedicated_allocator_init(oskp_phy_dedicated_allocator * const allocator,
-                                                              osk_phy_addr mem, u32 nr_pages, const char* name)
-{
-       osk_error error;
-
-       OSK_ASSERT(allocator);
-       OSK_ASSERT(nr_pages > 0);
-       /* Assert if not page aligned */
-       OSK_ASSERT( 0 == (mem & (OSK_PAGE_SIZE-1)) );
-
-       if (!mem)
-       {
-               /* no address to manage specified */
-               return OSK_ERR_FAIL;
-       }
-       else
-       {
-               u32 i;
-
-               /* try to obtain dedicated memory */
-               if(oskp_phy_dedicated_allocator_request_memory(mem, nr_pages, name) != OSK_ERR_NONE)
-               {
-                       /* requested memory not available */
-                       return OSK_ERR_FAIL;
-               }
-
-               allocator->base = mem;
-               allocator->num_pages  = nr_pages;
-               allocator->free_pages = allocator->num_pages;
-
-               error = osk_mutex_init(&allocator->lock, OSK_LOCK_ORDER_LAST );
-               if (OSK_ERR_NONE != error)
-               {
-                       return OSK_ERR_FAIL;
-               }
-
-               allocator->free_map = osk_calloc(sizeof(unsigned long) * ((nr_pages + OSK_BITS_PER_LONG - 1) / OSK_BITS_PER_LONG));
-               if (NULL == allocator->free_map)
-               {
-                       osk_mutex_term(&allocator->lock);
-                       return OSK_ERR_ALLOC;
-               }
-
-               /* correct for nr_pages not being a multiple of OSK_BITS_PER_LONG */
-               for (i = nr_pages; i < ((nr_pages + OSK_BITS_PER_LONG - 1) & ~(OSK_BITS_PER_LONG-1)); i++)
-               {
-                       osk_bitarray_set_bit(i, allocator->free_map);
-               }
-
-               return OSK_ERR_NONE;
-       }
-}
-
-OSK_STATIC_INLINE void oskp_phy_dedicated_allocator_term(oskp_phy_dedicated_allocator *allocator)
-{
-       OSK_ASSERT(allocator);
-       OSK_ASSERT(allocator->free_map);
-       oskp_phy_dedicated_allocator_release_memory(allocator->base, allocator->num_pages);
-       osk_free(allocator->free_map);
-       osk_mutex_term(&allocator->lock);
-}
-
-OSK_STATIC_INLINE u32 oskp_phy_dedicated_pages_alloc(oskp_phy_dedicated_allocator *allocator,
-                                                           u32 nr_pages, osk_phy_addr *pages)
-{
-       u32 pages_allocated;
-
-       OSK_ASSERT(pages);
-       OSK_ASSERT(allocator);
-       OSK_ASSERT(allocator->free_map);
-
-       osk_mutex_lock(&allocator->lock);
-
-       for (pages_allocated = 0; pages_allocated < OSK_MIN(nr_pages, allocator->free_pages); pages_allocated++)
-       {
-               u32 pfn;
-               void * mapping;
-
-               pfn = osk_bitarray_find_first_zero_bit(allocator->free_map, allocator->num_pages);
-               /* As the free_pages test passed ffz should never fail */
-               OSK_ASSERT(pfn != allocator->num_pages);
-
-               /* mark as allocated */
-               osk_bitarray_set_bit(pfn, allocator->free_map);
-
-               /* find phys addr of the page */
-               pages[pages_allocated] = allocator->base + (pfn << OSK_PAGE_SHIFT);
-
-               /* zero the page */
-               if(OSK_SIMULATE_FAILURE(OSK_OSK))
-               {
-                       mapping = NULL;
-               }
-               else
-               {
-                       mapping = ioremap_wc(pages[pages_allocated], SZ_4K);
-               }
-
-               if (NULL == mapping)
-               {
-                       /* roll back */
-                       for (pages_allocated++; pages_allocated > 0; pages_allocated--)
-                       {
-                               pfn = (pages[pages_allocated-1] - allocator->base) >> OSK_PAGE_SHIFT;
-                               osk_bitarray_clear_bit(pfn, allocator->free_map);
-                       }
-                       break;
-               }
-
-               OSK_MEMSET(mapping, 0x00, OSK_PAGE_SIZE);
-
-               osk_sync_to_memory(pages[pages_allocated], mapping, OSK_PAGE_SIZE);
-               iounmap(mapping);
-       }
-
-       allocator->free_pages -= pages_allocated;
-       osk_mutex_unlock(&allocator->lock);
-
-       return pages_allocated;
-}
-
-OSK_STATIC_INLINE void oskp_phy_dedicated_pages_free(oskp_phy_dedicated_allocator *allocator,
-                                                     u32 nr_pages, osk_phy_addr *pages)
-{
-       u32 i;
-
-       OSK_ASSERT(pages);
-       OSK_ASSERT(allocator);
-       OSK_ASSERT(allocator->free_map);
-
-       osk_mutex_lock(&allocator->lock);
-
-       for (i = 0; i < nr_pages; i++)
-       {
-               if (0 != pages[i])
-               {
-                       u32 pfn;
-
-                       OSK_ASSERT(pages[i] >= allocator->base);
-                       OSK_ASSERT(pages[i] < allocator->base + (allocator->num_pages << OSK_PAGE_SHIFT));
-               
-                       pfn = (pages[i] - allocator->base) >> OSK_PAGE_SHIFT;
-                       osk_bitarray_clear_bit(pfn, allocator->free_map);
-
-                       allocator->free_pages++;
-
-                       pages[i] = 0;
-               }
-       }
-
-       osk_mutex_unlock(&allocator->lock);
-}
-
-#endif /* _OSK_LOW_LEVEL_DEDICATED_MEM_H_ */
index 5be72080b5b6955f5789dacebaea26edbb08a69a..0a76c82897b1b7850eec78de228ae7f3c35132a8 100644 (file)
@@ -54,148 +54,6 @@ extern "C"
  */
 typedef void *osk_virt_addr;
 
-/**
- * Physical page allocator
- */
-typedef struct osk_phy_allocator osk_phy_allocator;
-
-/**
- * Dedicated physical page allocator
- */
-typedef struct oskp_phy_os_allocator oskp_phy_os_allocator;
-/**
- * OS physical page allocator
- */
-typedef struct oskp_phy_dedicated_allocator oskp_phy_dedicated_allocator;
-
-/**
- * @brief Initialize a physical page allocator
- *
- * The physical page allocator is responsible for allocating physical memory pages of
- * OSK_PAGE_SIZE bytes each. Pages are allocated through the OS or from a reserved
- * memory region.
- *
- * Physical page allocation through the OS
- *
- * If \a mem is 0, upto \a nr_pages of pages may be allocated through the OS for use
- * by a user process. OSs that require allocating CPU virtual address space in order
- * to allocate physical pages must observe that the CPU virtual address space is
- * allocated for the current user process and that the physical allocator must always
- * be used with this same user process.
- *
- * If \a mem is 0, and \a nr_pages is 0, a variable number of pages may be allocated
- * through the OS for use by the kernel (only limited by the available OS memory).
- * Allocated pages may be mapped into the kernel using osk_kmap(). The use case for
- * this type of physical allocator is the allocation of physical pages for MMU page
- * tables. OSs that require allocating CPU virtual address space in order
- * to allocate physical pages must likely manage a list of fixed size virtual
- * address regions against which pages are committed as more pages are allocated.
- *
- * Physical page allocation from a reserved memory region
- *
- * If \a mem is not 0, \a mem specifies the physical start address of a physically
- * contiguous memory region, from which \a nr_pages of pages may be allocated, for
- * use by a user process. The start address is aligned to OSK_PAGE_SIZE bytes.
- * The memory region must not be in use by the OS and solely for use by the physical
- * allocator. OSs that require allocating CPU virtual address space in order
- * to allocate physical pages must observe that the CPU virtual address space is
- * allocated for the current user process and that the physical allocator must always
- * be used with this same user process.
- *
- * @param[out] allocator physical allocator to initialize
- * @param[in] mem        Set \a mem to 0 if physical pages should be allocated through the OS,
- *                       otherwise \a mem represents the physical address of a reserved
- *                       memory region from which pages should be allocated. The physical
- *                       address must be OSK_PAGE_SIZE aligned.
- * @param[in] nr_pages   maximum number of physical pages that can be allocated.
- *                       If nr_pages > 0, pages are for use in user space.
- *                       If nr_pages is 0, a variable number number of pages can be allocated
- *                       (limited by the available pages from the OS) but the pages are
- *                       for use by the kernel and \a mem must be set to 0
- *                       (to enable allocating physical pages through the OS).
- * @param[in] name              name of the reserved memory region
- * @return OSK_ERR_NONE if successful. Any other value indicates failure.
- */
-OSK_STATIC_INLINE osk_error osk_phy_allocator_init(osk_phy_allocator * const allocator, osk_phy_addr mem, u32 nr_pages, const char* name) CHECK_RESULT;
-
-OSK_STATIC_INLINE osk_error oskp_phy_os_allocator_init(oskp_phy_os_allocator * const allocator,
-                                                       osk_phy_addr mem, u32 nr_pages) CHECK_RESULT;
-OSK_STATIC_INLINE osk_error oskp_phy_dedicated_allocator_init(oskp_phy_dedicated_allocator * const allocator,
-                                                              osk_phy_addr mem, u32 nr_pages, const char* name) CHECK_RESULT;
-OSK_STATIC_INLINE osk_error oskp_phy_dedicated_allocator_request_memory(osk_phy_addr mem,u32 nr_pages, const char* name) CHECK_RESULT;
-
-
-/**
- * @brief Terminate a physical page allocator
- *
- * Frees any resources necessary to manage the physical allocator. Any physical pages that
- * were allocated or mapped by the allocator must have been freed and unmapped earlier.
- *
- * Allocating and mapping pages using the terminated allocator is prohibited until the
- * the \a allocator is reinitailized with osk_phy_allocator_init().
- *
- * @param[in] allocator initialized physical allocator
- */
-OSK_STATIC_INLINE void osk_phy_allocator_term(osk_phy_allocator *allocator);
-
-OSK_STATIC_INLINE void oskp_phy_os_allocator_term(oskp_phy_os_allocator *allocator);
-OSK_STATIC_INLINE void oskp_phy_dedicated_allocator_term(oskp_phy_dedicated_allocator *allocator);
-OSK_STATIC_INLINE void oskp_phy_dedicated_allocator_release_memory(osk_phy_addr mem,u32 nr_pages);
-
-/**
- * @brief Allocate physical pages
- *
- * Allocates \a nr_pages physical pages of OSK_PAGE_SIZE each using the physical
- * allocator \a allocator and stores the physical address of each allocated page
- * in the \a pages array.
- *
- * If the physical allocator was initialized to allocate pages for use by a user
- * process, the pages need to be allocated in the same user space context as the
- * physical allocator was initialized in.
- *
- * This function may block and cannot be used from ISR context.
- *
- * @param[in] allocator initialized physical allocator
- * @param[in] nr_pages  number of physical pages to allocate
- * @param[out] pages    array of \a nr_pages elements storing the physical
- *                      address of an allocated page
- * @return The number of pages successfully allocated,
- * which might be lower than requested, including zero pages.
- */
-OSK_STATIC_INLINE u32 osk_phy_pages_alloc(osk_phy_allocator *allocator, u32 nr_pages, osk_phy_addr *pages) CHECK_RESULT;
-
-OSK_STATIC_INLINE u32 oskp_phy_os_pages_alloc(oskp_phy_os_allocator *allocator,
-                                                    u32 nr_pages, osk_phy_addr *pages) CHECK_RESULT;
-OSK_STATIC_INLINE u32 oskp_phy_dedicated_pages_alloc(oskp_phy_dedicated_allocator *allocator,
-                                                           u32 nr_pages, osk_phy_addr *pages) CHECK_RESULT;
-
-/**
- * @brief Free physical pages
- *
- * Frees physical pages previously allocated by osk_phy_pages_alloc(). The same
- * arguments used for the allocation need to be specified when freeing them.
- *
- * Freeing individual pages of a set of pages allocated by osk_phy_pages_alloc()
- * is not allowed.
- *
- * If the physical allocator was initialized to allocate pages for use by a user
- * process, the pages need to be freed in the same user space context as the
- * physical allocator was initialized in.
- *
- * The contents of the \a pages array is undefined after osk_phy_pages_free has
- * freed the pages.
- *
- * @param[in] allocator initialized physical allocator
- * @param[in] nr_pages  number of physical pages to free (as used during the allocation)
- * @param[in] pages     array of \a nr_pages storing the physical address of an
- *                      allocated page (as used during the allocation).
- */
-OSK_STATIC_INLINE void osk_phy_pages_free(osk_phy_allocator *allocator, u32 nr_pages, osk_phy_addr *pages);
-
-OSK_STATIC_INLINE void oskp_phy_os_pages_free(oskp_phy_os_allocator *allocator,
-                                              u32 nr_pages, osk_phy_addr *pages);
-OSK_STATIC_INLINE void oskp_phy_dedicated_pages_free(oskp_phy_dedicated_allocator *allocator,
-                                                     u32 nr_pages, osk_phy_addr *pages);
 /**
  * @brief Map a physical page into the kernel
  *
@@ -331,85 +189,8 @@ OSK_STATIC_INLINE void osk_sync_to_cpu(osk_phy_addr paddr, osk_virt_addr vaddr,
 /** @} */ /* end group base_api */
 
 /* pull in the arch header with the implementation  */
-#include "mali_osk_low_level_dedicated_mem.h"
 #include <osk/mali_osk_arch_low_level_mem.h>
 
-typedef enum oskp_phy_allocator_type
-{
-       OSKP_PHY_ALLOCATOR_OS,
-       OSKP_PHY_ALLOCATOR_DEDICATED
-} oskp_phy_allocator_type;
-
-struct osk_phy_allocator
-{
-       oskp_phy_allocator_type type;
-       union {
-               struct oskp_phy_dedicated_allocator dedicated;
-               struct oskp_phy_os_allocator        os;
-       } data;
-};
-
-
-OSK_STATIC_INLINE osk_error osk_phy_allocator_init(osk_phy_allocator * const allocator, osk_phy_addr mem, u32 nr_pages, const char* name)
-{
-       OSK_ASSERT(allocator);
-       if (mem == 0)
-       {
-               allocator->type = OSKP_PHY_ALLOCATOR_OS;
-               return oskp_phy_os_allocator_init(&allocator->data.os, mem, nr_pages);
-       }
-       else
-       {
-               allocator->type = OSKP_PHY_ALLOCATOR_DEDICATED;
-               return oskp_phy_dedicated_allocator_init(&allocator->data.dedicated, mem, nr_pages, name);
-       }
-}
-
-OSK_STATIC_INLINE void osk_phy_allocator_term(osk_phy_allocator *allocator)
-{
-       OSK_ASSERT(allocator);
-       if (allocator->type == OSKP_PHY_ALLOCATOR_OS)
-       {
-               oskp_phy_os_allocator_term(&allocator->data.os);
-       }
-       else
-       {
-               oskp_phy_dedicated_allocator_term(&allocator->data.dedicated);
-       }
-}
-
-OSK_STATIC_INLINE u32 osk_phy_pages_alloc(osk_phy_allocator *allocator, u32 nr_pages, osk_phy_addr *pages)
-{
-       OSK_ASSERT(allocator);
-       OSK_ASSERT(pages);
-       if (allocator->type != OSKP_PHY_ALLOCATOR_OS && allocator->type != OSKP_PHY_ALLOCATOR_DEDICATED)
-       {
-               return 0;
-       }
-       if (allocator->type == OSKP_PHY_ALLOCATOR_OS)
-       {
-               return oskp_phy_os_pages_alloc(&allocator->data.os, nr_pages, pages);
-       }
-       else
-       {
-               return oskp_phy_dedicated_pages_alloc(&allocator->data.dedicated, nr_pages, pages);
-       }
-}
-
-OSK_STATIC_INLINE void osk_phy_pages_free(osk_phy_allocator *allocator, u32 nr_pages, osk_phy_addr *pages)
-{
-       OSK_ASSERT(allocator);
-       OSK_ASSERT(pages);
-       if (allocator->type == OSKP_PHY_ALLOCATOR_OS)
-       {
-               oskp_phy_os_pages_free(&allocator->data.os, nr_pages, pages);
-       }
-       else
-       {
-               oskp_phy_dedicated_pages_free(&allocator->data.dedicated, nr_pages, pages);
-       }
-}
-
 #ifdef __cplusplus
 }
 #endif
index dfe41f265e2725575b869f7d5c92be0512052f2e..688d2240b2b81498c93b3d1d3b234191f31141b0 100644 (file)
 #include <linux/highmem.h>
 #include <linux/dma-mapping.h>
 
-extern atomic_t mali_memory_pages;
-
-struct oskp_phy_os_allocator
-{
-};
-
-OSK_STATIC_INLINE osk_error oskp_phy_os_allocator_init(oskp_phy_os_allocator * const allocator,
-                                                       osk_phy_addr mem, u32 nr_pages)
-{
-       OSK_ASSERT(NULL != allocator);
-
-       return OSK_ERR_NONE;
-}
-
-OSK_STATIC_INLINE void oskp_phy_os_allocator_term(oskp_phy_os_allocator *allocator)
-{
-       OSK_ASSERT(NULL != allocator);
-       /* Nothing needed */
-}
-
-OSK_STATIC_INLINE u32 oskp_phy_os_pages_alloc(oskp_phy_os_allocator *allocator,
-                                                    u32 nr_pages, osk_phy_addr *pages)
-{
-       int i;
-
-       OSK_ASSERT(NULL != allocator);
-
-       if(OSK_SIMULATE_FAILURE(OSK_OSK))
-       {
-               return 0;
-       }
-
-       for (i = 0; i < nr_pages; i++)
-       {
-               struct page *p;
-               void * mp;
-
-#ifdef CONFIG_MALI_BASE_ALLOC_FAIL
-               p = alloc_page(GFP_HIGHUSER | __GFP_NORETRY | __GFP_NOMEMALLOC);
-#else
-               p = alloc_page(GFP_HIGHUSER);
-#endif
-
-               if (NULL == p)
-               {
-                       break;
-               }
-
-               mp = kmap(p);
-               if (NULL == mp)
-               {
-                       __free_page(p);
-                       break;
-               }
-
-               memset(mp, 0x00, PAGE_SIZE); /* instead of __GFP_ZERO, so we can do cache maintenance */
-               osk_sync_to_memory(PFN_PHYS(page_to_pfn(p)), mp, PAGE_SIZE);
-               kunmap(p);
-
-               pages[i] = PFN_PHYS(page_to_pfn(p));
-       }
-
-       atomic_add(i, &mali_memory_pages);
-
-       return i;
-}
-
-static inline void oskp_phy_os_pages_free(oskp_phy_os_allocator *allocator,
-                                          u32 nr_pages, osk_phy_addr *pages)
-{
-       int i;
-
-       OSK_ASSERT(NULL != allocator);
-
-       atomic_sub(nr_pages, &mali_memory_pages);
-
-       for (i = 0; i < nr_pages; i++)
-       {
-               if (0 != pages[i])
-               {
-                       __free_page(pfn_to_page(PFN_DOWN(pages[i])));
-                       pages[i] = (osk_phy_addr)0;
-               }
-       }
-}
-
-
 OSK_STATIC_INLINE osk_error oskp_phy_dedicated_allocator_request_memory(osk_phy_addr mem,u32 nr_pages, const char* name)
 {
        if(OSK_SIMULATE_FAILURE(OSK_OSK))
index cf5a96d21692943758752098e51cd6f790cb8228..9c62c69e271d6c6ee9a741f1f58c74eca58859ee 100644 (file)
@@ -11,7 +11,7 @@
 
 
 # Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r1p2-04dev0"
+MALI_RELEASE_NAME ?= "r1p2-06bet0"
 
 # Paths required for build
 KBASE_PATH = $(src)/../..
@@ -165,6 +165,10 @@ endif
 
 # Tell the Linux build system from which .o file to create the kernel module
 obj-$(CONFIG_MALI_T6XX) += mali_kbase.o
+ifeq ($(CONFIG_MACH_MANTA),y)
+obj-$(CONFIG_MALI_T6XX) += platform/mali_kbase_platform.o
+obj-$(CONFIG_MALI_T6XX) += platform/mali_kbase_dvfs.o
+endif
 
 # Tell the Linux build system to enable building of our .c files
 mali_kbase-y := $(SRC:.c=.o)
index 9e75a14e5a1eaa5e6d0e5216a7ca903d77829b48..c5e15368b60be0b624c40509a16ec17bed77baa3 100644 (file)
@@ -231,7 +231,6 @@ mali_error kbasep_8401_workaround_init(kbase_device *kbdev)
 {
        kbasep_js_device_data *js_devdata;
        kbase_context *workaround_kctx;
-       u32 count;
        int i;
        u16 as_present_mask;
 
@@ -263,13 +262,9 @@ mali_error kbasep_8401_workaround_init(kbase_device *kbdev)
        }
 
        /* Allocate the pages required to contain the job */
-       count = kbase_phy_pages_alloc(workaround_kctx->kbdev,
-                                     &workaround_kctx->pgd_allocator,
-                                     KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT,
-                                     kbdev->workaround_compute_job_pa);
-       if(count < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT)
+       if (MALI_ERROR_NONE != kbase_mem_allocator_alloc(&workaround_kctx->osalloc, KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT, kbdev->workaround_compute_job_pa, KBASE_REG_MUST_ZERO))
        {
-               goto page_release;
+               goto no_pages;
        }
 
        /* Get virtual address of mapped memory and write a compute job for each page */
@@ -303,8 +298,8 @@ page_free:
        {
                osk_kunmap(kbdev->workaround_compute_job_pa[i], kbdev->workaround_compute_job_va[i]);
        }
-page_release:
-       kbase_phy_pages_free(kbdev, &workaround_kctx->pgd_allocator, count, kbdev->workaround_compute_job_pa);
+       kbase_mem_allocator_free(&workaround_kctx->osalloc, KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT, kbdev->workaround_compute_job_pa);
+no_pages:
        kbase_destroy_context(workaround_kctx);
 
        return MALI_ERROR_FUNCTION_FAILED;
@@ -330,7 +325,7 @@ void kbasep_8401_workaround_term(kbase_device *kbdev)
                osk_kunmap(kbdev->workaround_compute_job_pa[i], kbdev->workaround_compute_job_va[i]);
        }
 
-       kbase_phy_pages_free(kbdev, &kbdev->workaround_kctx->pgd_allocator, KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT, kbdev->workaround_compute_job_pa);
+       kbase_mem_allocator_free(&kbdev->workaround_kctx->osalloc, KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT, kbdev->workaround_compute_job_pa);
 
        kbase_destroy_context(kbdev->workaround_kctx);
        kbdev->workaround_kctx = NULL;
index 7340d1751dc97a97a654cdb3bf28eba6781bec02..df6e8b751ea06db1f305f3670dd4fd8c626fc2c4 100644 (file)
@@ -22,7 +22,6 @@
 /*
  * The output flags should be a combination of the following values:
  * KBASE_REG_CPU_CACHED: CPU cache should be enabled
- * KBASE_REG_GPU_CACHED: GPU cache should be enabled
  */
 u32 kbase_cache_enabled(u32 flags, u32 nr_pages)
 {
@@ -30,9 +29,9 @@ u32 kbase_cache_enabled(u32 flags, u32 nr_pages)
 
        CSTD_UNUSED(nr_pages);
 
-       if (flags & BASE_MEM_CACHED)
+       if (flags & BASE_MEM_CACHED_CPU)
        {
-               cache_flags |= KBASE_REG_CPU_CACHED | KBASE_REG_GPU_CACHED;
+               cache_flags |= KBASE_REG_CPU_CACHED;
        }
 
        return cache_flags;
index a53b2df2de6f4aa72fd5118d1236fdf625471191..5b6f2203a1a9ae380b2aeb1dbb0ff06498aa41da 100644 (file)
@@ -20,6 +20,7 @@
 #include <kbase/src/common/mali_kbase.h>
 #include <kbase/src/common/mali_midg_regmap.h>
 
+#define MEMPOOL_PAGES 16384
 /**
  * @brief Create a kernel base context.
  *
@@ -29,7 +30,6 @@
 kbase_context *kbase_create_context(kbase_device *kbdev)
 {
        kbase_context *kctx;
-       osk_error osk_err;
        mali_error mali_err;
 
        OSK_ASSERT(kbdev != NULL);
@@ -41,7 +41,7 @@ kbase_context *kbase_create_context(kbase_device *kbdev)
        }
        else
        {
-               kctx = kzalloc(sizeof(*kctx), GFP_KERNEL);
+               kctx = vzalloc(sizeof(*kctx));
        }
 
        if (!kctx)
@@ -57,11 +57,18 @@ kbase_context *kbase_create_context(kbase_device *kbdev)
        kctx->process_mm = NULL;
        atomic_set(&kctx->nonmapped_pages, 0);
 
-       if (kbase_mem_usage_init(&kctx->usage, kctx->kbdev->memdev.per_process_memory_limit >> PAGE_SHIFT))
+       if (MALI_ERROR_NONE != kbase_mem_allocator_init(&kctx->osalloc, MEMPOOL_PAGES))
        {
                goto free_kctx;
        }
 
+       kctx->pgd_allocator = &kctx->osalloc;
+
+       if (kbase_mem_usage_init(&kctx->usage, kctx->kbdev->memdev.per_process_memory_limit >> PAGE_SHIFT))
+       {
+               goto free_allocator;
+       }
+
        if (kbase_jd_init(kctx))
                goto free_memctx;
 
@@ -83,15 +90,9 @@ kbase_context *kbase_create_context(kbase_device *kbdev)
        OSK_DLIST_INIT(&kctx->waiting_kds_resource);
 #endif
 
-       /* Use a new *Shared Memory* allocator for GPU page tables.
-        * See MIDBASE-1534 for details. */
-       osk_err = osk_phy_allocator_init(&kctx->pgd_allocator, 0, 0, NULL);
-       if (OSK_ERR_NONE != osk_err)
-               goto free_event;
-
        mali_err = kbase_mmu_init(kctx);
        if(MALI_ERROR_NONE != mali_err)
-               goto free_phy;
+               goto free_event;
 
        kctx->pgd = kbase_mmu_alloc_pgd(kctx);
        if (!kctx->pgd)
@@ -112,8 +113,6 @@ free_pgd:
        kbase_mmu_free_pgd(kctx);
 free_mmu:
        kbase_mmu_term(kctx);
-free_phy:
-       osk_phy_allocator_term(&kctx->pgd_allocator);
 free_event:
        kbase_event_cleanup(kctx);
 free_jd:
@@ -122,8 +121,10 @@ free_jd:
        kbase_jd_exit(kctx);
 free_memctx:
        kbase_mem_usage_term(&kctx->usage);
+free_allocator:
+       kbase_mem_allocator_term(&kctx->osalloc);
 free_kctx:
-       kfree(kctx);
+       vfree(kctx);
 out:
        return NULL;
        
@@ -167,7 +168,6 @@ void kbase_destroy_context(kbase_context *kctx)
 
        /* MMU is disabled as part of scheduling out the context */
        kbase_mmu_free_pgd(kctx);
-       osk_phy_allocator_term(&kctx->pgd_allocator);
        kbase_region_tracker_term(kctx);
        kbase_destroy_os_context(&kctx->osctx);
        kbase_gpu_vm_unlock(kctx);
@@ -188,7 +188,9 @@ void kbase_destroy_context(kbase_context *kctx)
                kbase_pm_context_idle(kbdev);
        }
        WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
-       kfree(kctx);
+
+       kbase_mem_allocator_term(&kctx->osalloc);
+       vfree(kctx);
 }
 KBASE_EXPORT_SYMBOL(kbase_destroy_context)
 
index cffa853a8fcfaa3dfb5b00e67c8d73f057cfbd97..591a368454446450ffb793f57f94102c064ea769 100644 (file)
@@ -30,6 +30,8 @@
 
 #include <linux/hrtimer.h>
 #include <linux/ktime.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
 
 #ifdef CONFIG_KDS
 #include <linux/kds.h>
@@ -365,33 +367,6 @@ typedef struct kbasep_mem_usage
  * Enumeration lists different orders in which physical allocators are selected on memory allocation.
  *
  */
-typedef enum kbase_phys_allocator_order
-{
-       ALLOCATOR_ORDER_CONFIG,                 /* Select allocators in order they appeared in the configuration file */
-       ALLOCATOR_ORDER_GPU_PERFORMANCE,        /* Select allocators in order from fastest to slowest on the GPU */
-       ALLOCATOR_ORDER_CPU_PERFORMANCE,        /* Select allocators in order from fastest to slowest on the CPU */
-       ALLOCATOR_ORDER_CPU_GPU_PERFORMANCE,    /* Select allocators in order from fastest to slowest on the CPU and GPU */
-
-       ALLOCATOR_ORDER_COUNT
-} kbase_phys_allocator_order;
-
-
-/* A simple structure to keep a sorted list of
- * osk_phy_allocator pointers.
- * Used by the iterator object
- */
-typedef struct kbase_phys_allocator_array
-{
-       /* the allocators */
-       osk_phy_allocator * allocs;
-       osk_phy_allocator ** sorted_allocs[ALLOCATOR_ORDER_COUNT];
-       /* number of allocators */
-       unsigned int count;
-
-#ifdef CONFIG_MALI_DEBUG
-       mali_bool it_bound;
-#endif /* CONFIG_MALI_DEBUG */
-} kbase_phys_allocator_array;
 
 /**
  * Instrumentation State Machine States
@@ -432,9 +407,19 @@ typedef struct kbasep_mem_device
                                                                Read-only, copied from platform configuration on startup. */
        kbasep_mem_usage           usage;                    /* Tracks usage of OS shared memory. Initialized with platform
                                                                configuration data, updated when OS memory is allocated/freed.*/
-       kbase_phys_allocator_array allocators;               /* List of available physical memory allocators */
 } kbasep_mem_device;
 
+/* raw page handling */
+typedef struct kbase_mem_allocator
+{
+       atomic_t            free_list_size;
+       unsigned int        free_list_max_size;
+       struct mutex        free_list_lock;
+       struct list_head    free_list_head;
+       struct shrinker     free_list_reclaimer;
+       struct kmem_cache * free_list_highmem_slab;
+       mempool_t         * free_list_highmem_pool;
+} kbase_mem_allocator;
 
 #define KBASE_TRACE_CODE( X ) KBASE_TRACE_CODE_ ## X
 
@@ -480,8 +465,6 @@ struct kbase_device {
 
        kbase_as                as[BASE_MAX_NR_AS];
 
-       osk_phy_allocator       mmu_fault_allocator;
-       osk_phy_addr            mmu_fault_pages[4];
        spinlock_t        mmu_mask_change;
 
        kbase_gpu_props         gpu_props;
@@ -610,7 +593,6 @@ struct kbase_device {
 struct kbase_context
 {
        kbase_device            *kbdev;
-       osk_phy_allocator       pgd_allocator;
        osk_phy_addr            pgd;
        osk_dlist               event_list;
        struct mutex            event_mutex;
@@ -632,6 +614,8 @@ struct kbase_context
        kbasep_mem_usage        usage;
        atomic_t                nonmapped_pages;
        ukk_session             ukk_session;
+       kbase_mem_allocator     osalloc;
+       kbase_mem_allocator *   pgd_allocator;
 
        osk_dlist               waiting_soft_jobs;
 
index 6d7b63cbbd9681e1a667b84f86c8a8f0823d579e..b56cfb15ca545b63b8c2ae627225f652112bd74a 100644 (file)
@@ -123,14 +123,12 @@ mali_error kbase_device_init(kbase_device *kbdev)
 
                kbdev->as[i].number = i;
                kbdev->as[i].fault_addr = 0ULL;
-
                /* Simulate failure to create the workqueue */
                if(OSK_SIMULATE_FAILURE(OSK_BASE_CORE))
                {
                        kbdev->as[i].pf_wq = NULL;
                        goto free_workqs;
                }
-
                kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
                if (NULL == kbdev->as[i].pf_wq)
                {
@@ -138,7 +136,6 @@ mali_error kbase_device_init(kbase_device *kbdev)
                }
 
                mutex_init(&kbdev->as[i].transaction_mutex);
-
                if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
                {
                        struct hrtimer * poking_timer = &kbdev->as[i].poke_timer;
@@ -150,7 +147,6 @@ mali_error kbase_device_init(kbase_device *kbdev)
                                destroy_workqueue(kbdev->as[i].pf_wq);
                                goto free_workqs;
                        }
-
                        kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
                        if (NULL == kbdev->as[i].poke_wq)
                        {
@@ -344,6 +340,7 @@ void kbase_device_trace_register_access(kbase_context * kctx, kbase_reg_access_t
                /* store the trace entry at the selected offset */
                tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
                tb[write_offset * 2 + 1] = reg_value;
+
                mb();
 
                /* new header word */
@@ -571,6 +568,7 @@ void kbasep_trace_dump(kbase_device *kbdev)
        u32 start;
        u32 end;
 
+
        OSK_PRINT( OSK_BASE_CORE, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
        spin_lock_irqsave( &kbdev->trace_lock, flags);
        start = kbdev->trace_first_out;
index fca089e8decacc76b75a60b63d920ed2bcaac0dd..7b6965c19ad35cdb2d75a9d82ed184d91fecb2d3 100644 (file)
@@ -70,7 +70,7 @@ mali_error kbase_gpuprops_uk_get_props(kbase_context *kctx, kbase_uk_gpuprops *
                                                   gpu_speed_khz,
                                                   kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min,
                                                   kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max);
-                       }       
+                       }
                }
 #endif /* CONFIG_MALI_DEBUG */
        }
index f7ddee8c4a822f2469e6cface1f290228d793543..a3ab7a3d2404254c1bd29ee7b589065bfa0a6e28 100644 (file)
@@ -91,6 +91,7 @@ static int jd_run_atom(kbase_jd_atom *katom)
 
 #ifdef CONFIG_KDS
 
+
 /* Add the katom to the kds waiting list.
  * Atoms must be added to the waiting list after a successful call to kds_async_waitall.
  * The caller must hold the kbase_jd_context.lock */
@@ -118,6 +119,7 @@ static void kbase_jd_kds_waiters_remove(kbase_jd_atom *katom)
        OSK_DLIST_REMOVE(&kctx->waiting_kds_resource, katom, kds_wait_item );
 }
 
+
 static void kds_dep_clear(void * callback_parameter, void * callback_extra_parameter)
 {
        kbase_jd_atom * katom;
@@ -178,6 +180,7 @@ void kbase_cancel_kds_wait_job(kbase_jd_atom *katom)
                }
        }
 }
+
 #endif /* CONFIG_KDS */
 
 #ifdef CONFIG_DMA_SHARED_BUFFER
@@ -252,7 +255,7 @@ void kbase_jd_free_external_resources(kbase_jd_atom *katom)
                mutex_unlock(&jctx->lock);
 
                /* Release the kds resource or cancel if zapping */
-               kds_resource_set_release_sync(&katom->kds_rset);
+               kds_resource_set_release(&katom->kds_rset);
        }
 #endif /* CONFIG_KDS */
 }
@@ -307,6 +310,7 @@ static void kbase_jd_post_external_resources(kbase_jd_atom * katom)
 #endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
 }
 
+#if defined(CONFIG_DMA_SHARED_BUFFER_USES_KDS) || defined(CONFIG_KDS)
 static void add_kds_resource(struct kds_resource *kds_res, struct kds_resource ** kds_resources, u32 *kds_res_count,
                              unsigned long * kds_access_bitmap, mali_bool exclusive)
 {
@@ -326,6 +330,7 @@ static void add_kds_resource(struct kds_resource *kds_res, struct kds_resource *
                osk_bitarray_set_bit(*kds_res_count, kds_access_bitmap);
        (*kds_res_count)++;
 }
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER_USES_KDS) || defined(CONFIG_KDS) */
 
 static mali_error kbase_jd_pre_external_resources(kbase_jd_atom * katom, const base_jd_atom_v2 *user_atom)
 {
@@ -478,7 +483,6 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom * katom, const b
        {
                /* We have resources to wait for with kds */
                katom->kds_dep_satisfied = MALI_FALSE;
-
                if (kds_async_waitall(&katom->kds_rset, KDS_FLAG_LOCKED_IGNORE, &katom->kctx->jctx.kds_cb,
                                      katom, NULL, kds_res_count, kds_access_bitmap, kds_resources))
                {
@@ -1267,6 +1271,7 @@ static enum hrtimer_restart  zap_timeout_callback( struct hrtimer * timer )
 
        if (kbase_prepare_to_reset_gpu(kbdev))
        {
+               OSK_PRINT_WARN(OSK_BASE_JD, "NOTE: GPU will now be reset as a workaround for a hardware issue");
                kbase_reset_gpu(kbdev);
        }
 
@@ -1280,6 +1285,9 @@ out:
 
 void kbase_jd_zap_context(kbase_context *kctx)
 {
+#ifdef CONFIG_KDS
+       kbase_jd_atom *katom = NULL;
+#endif
        kbase_device *kbdev;
        zap_reset_data reset_data;
        unsigned long flags;
@@ -1291,24 +1299,34 @@ void kbase_jd_zap_context(kbase_context *kctx)
        kbase_job_zap_context(kctx);
 
        mutex_lock(&kctx->jctx.lock);
+
        while(!OSK_DLIST_IS_EMPTY(&kctx->waiting_soft_jobs))
        {
                kbase_jd_atom *katom = OSK_DLIST_POP_FRONT(&kctx->waiting_soft_jobs, kbase_jd_atom, dep_item[0]);
 
                kbase_cancel_soft_job(katom);
        }
+
 #ifdef CONFIG_KDS
+
+       /* For each job waiting on a kds resource, cancel the wait and force the job to
+        * complete early, this is done so that we don't leave jobs outstanding waiting
+        * on kds resources which may never be released when contexts are zapped, resulting
+        * in a hang.
+        *
+        * Note that we can safely iterate over the list as the kbase_jd_context lock is held,
+        * this prevents items being removed when calling job_done_nolock in kbase_cancel_kds_wait_job.
+        */
+
+       OSK_DLIST_FOREACH( &kctx->waiting_kds_resource,
+                                          kbase_jd_atom,
+                                          kds_wait_item,
+                                          katom )
        {
-               kbase_jd_atom *katom = NULL;
-               OSK_DLIST_FOREACH( &kctx->waiting_kds_resource,
-                                                  kbase_jd_atom,
-                                          kds_wait_item,
-                                          katom )
-               {
-                       kbase_cancel_kds_wait_job(katom);
-               }
+               kbase_cancel_kds_wait_job(katom);
        }
 #endif
+
        mutex_unlock(&kctx->jctx.lock);
 
        hrtimer_init_on_stack(&reset_data.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL );
@@ -1411,7 +1429,9 @@ mali_error kbase_jd_init(kbase_context *kctx)
 
        return MALI_ERROR_NONE;
 
+#ifdef CONFIG_KDS
 out2:
+#endif /* CONFIG_KDS */
        destroy_workqueue(kctx->jctx.job_done_wq);
 out1:
        return mali_err;
index d0aa32d8090a0d6db8d10fe7b733b57fb7629b2f..e3ca504f27f69a7dad189c1193edb564e899b194 100644 (file)
@@ -147,7 +147,7 @@ void kbase_job_done_slot(kbase_device *kbdev, int s, u32 completion_code, u64 jo
        /* Only update the event code for jobs that weren't cancelled */
        if ( katom->event_code != BASE_JD_EVENT_JOB_CANCELLED )
        {
-               katom->event_code = completion_code;
+               katom->event_code = (base_jd_event_code)completion_code;
        }
        kbase_device_trace_register_access(kctx, REG_WRITE , JOB_CONTROL_REG(JOB_IRQ_CLEAR), 1 << s);
 
index 7a8c67a9ac97eff1a8ff5fe77941e4d95d5c141e..72a94419ca1951abb0a964a072d2b766a19c9903 100644 (file)
 #include <kbase/src/common/mali_kbase_gator.h>
 
 #include <asm/atomic.h>
+#include <linux/highmem.h>
+#include <linux/mempool.h>
+#include <linux/mm.h>
+
+struct kbase_page_metadata
+{
+       struct list_head list;
+       struct page * page;
+};
+
+STATIC int kbase_mem_allocator_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+       kbase_mem_allocator * allocator;
+       int i;
+       int freed;
+
+       allocator = container_of(s, kbase_mem_allocator, free_list_reclaimer);
+
+       if (sc->nr_to_scan == 0)
+               return atomic_read(&allocator->free_list_size);
+
+       might_sleep();
+
+       mutex_lock(&allocator->free_list_lock);
+
+       i = MIN(atomic_read(&allocator->free_list_size), sc->nr_to_scan);
+       freed = i;
+
+       atomic_sub(i, &allocator->free_list_size);
+
+       while (i--)
+       {
+               struct kbase_page_metadata * md;
+               struct page * p;
+               BUG_ON(list_empty(&allocator->free_list_head));
+               md = list_first_entry(&allocator->free_list_head, struct kbase_page_metadata, list);
+               list_del(&md->list);
+               p = md->page;
+               if (likely(PageHighMem(p)))
+               {
+                       mempool_free(md, allocator->free_list_highmem_pool);
+               }
+               __free_page(p);
+       }
+
+       mutex_unlock(&allocator->free_list_lock);
+
+       return atomic_read(&allocator->free_list_size);
+}
+
+mali_error kbase_mem_allocator_init(kbase_mem_allocator * const allocator, unsigned int max_size)
+{
+       OSK_ASSERT(NULL != allocator);
+
+       allocator->free_list_highmem_slab = KMEM_CACHE(kbase_page_metadata, SLAB_HWCACHE_ALIGN);
+       if (!allocator->free_list_highmem_slab)
+       {
+               return MALI_ERROR_OUT_OF_MEMORY;
+       }
+       allocator->free_list_highmem_pool = mempool_create_slab_pool(0, allocator->free_list_highmem_slab);
+       if (!allocator->free_list_highmem_pool)
+       {
+               kmem_cache_destroy(allocator->free_list_highmem_slab);
+               return MALI_ERROR_OUT_OF_MEMORY;
+       }
+
+       INIT_LIST_HEAD(&allocator->free_list_head);
+       mutex_init(&allocator->free_list_lock);
+       atomic_set(&allocator->free_list_size, 0);
+
+       allocator->free_list_max_size = max_size;
+
+       allocator->free_list_reclaimer.shrink = kbase_mem_allocator_shrink;
+       allocator->free_list_reclaimer.seeks = DEFAULT_SEEKS;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) /* Kernel versions prior to 3.1 : struct shrinker does not define batch */
+       allocator->free_list_reclaimer.batch = 0;
+#endif
+       register_shrinker(&allocator->free_list_reclaimer);
+
+       return MALI_ERROR_NONE;
+}
+
+void kbase_mem_allocator_term(kbase_mem_allocator *allocator)
+{
+       OSK_ASSERT(NULL != allocator);
+
+       unregister_shrinker(&allocator->free_list_reclaimer);
+
+       while (!list_empty(&allocator->free_list_head))
+       {
+               struct kbase_page_metadata * md;
+               struct page * p;
+               md = list_first_entry(&allocator->free_list_head, struct kbase_page_metadata, list);
+               list_del(&md->list);
+               p = md->page;
+               if (likely(PageHighMem(p)))
+               {
+                       mempool_free(md, allocator->free_list_highmem_pool);
+               }
+               __free_page(p);
+       }
+
+       mempool_destroy(allocator->free_list_highmem_pool);
+       kmem_cache_destroy(allocator->free_list_highmem_slab);
+       mutex_destroy(&allocator->free_list_lock);
+}
+
+mali_error kbase_mem_allocator_alloc(kbase_mem_allocator *allocator, u32 nr_pages, osk_phy_addr *pages, int flags)
+{
+       struct kbase_page_metadata * md;
+       struct kbase_page_metadata * tmp;
+       struct page * p;
+       void * mp;
+       int i;
+       int num_from_free_list;
+       struct list_head from_free_list = LIST_HEAD_INIT(from_free_list);
+
+       might_sleep();
+
+       OSK_ASSERT(NULL != allocator);
+
+       /* take from the free list first */
+       mutex_lock(&allocator->free_list_lock);
+
+       num_from_free_list = MIN(nr_pages, atomic_read(&allocator->free_list_size));
+       atomic_sub(num_from_free_list, &allocator->free_list_size);
+
+       for (i = 0; i < num_from_free_list; i++)
+       {
+               BUG_ON(list_empty(&allocator->free_list_head));
+               md = list_first_entry(&allocator->free_list_head, struct kbase_page_metadata, list);
+               list_move(&md->list, &from_free_list);
+       }
+       mutex_unlock(&allocator->free_list_lock);
+
+       i = 0;
+       list_for_each_entry_safe(md, tmp, &from_free_list, list)
+       {
+               list_del(&md->list);
+               p = md->page;
+               if (likely(PageHighMem(p)))
+               {
+                       mempool_free(md, allocator->free_list_highmem_pool);
+               }
+               else if (!(flags & KBASE_REG_MUST_ZERO))
+               {
+                       flush_dcache_page(p);
+               }
+
+               if (flags & KBASE_REG_MUST_ZERO)
+               {
+                       mp = kmap(p);
+                       if (NULL == mp)
+                       {
+                               /* free the current page */
+                               __free_page(p);
+                               /* put the rest back on the free list */
+                               mutex_lock(&allocator->free_list_lock);
+                               list_splice(&from_free_list, &allocator->free_list_head);
+                               atomic_add(num_from_free_list - i - 1, &allocator->free_list_size);
+                               mutex_unlock(&allocator->free_list_lock);
+                               /* drop down to the normal Linux alloc */
+                               break;
+                       }
+                       memset(mp, 0x00, PAGE_SIZE);
+                       osk_sync_to_memory(PFN_PHYS(page_to_pfn(p)), mp, PAGE_SIZE);
+                       kunmap(p);
+               }
+
+               pages[i] = PFN_PHYS(page_to_pfn(p));
+               i++;
+       }
+
+       if (i == nr_pages)
+               return MALI_ERROR_NONE;
+
+       for (; i < nr_pages; i++)
+       {
+               p = alloc_page(GFP_HIGHUSER);
+               if (NULL == p)
+               {
+                       goto err_out_roll_back;
+               }
+
+               mp = kmap(p);
+               if (NULL == mp)
+               {
+                       __free_page(p);
+                       goto err_out_roll_back;
+               }
+               memset(mp, 0x00, PAGE_SIZE); /* instead of __GFP_ZERO, so we can do cache maintenance */
+               osk_sync_to_memory(PFN_PHYS(page_to_pfn(p)), mp, PAGE_SIZE);
+               kunmap(p);
+               pages[i] = PFN_PHYS(page_to_pfn(p));
+       }
+
+       return MALI_ERROR_NONE;
+
+err_out_roll_back:
+       while (i--)
+       {
+               struct page * p;
+               p = pfn_to_page(PFN_DOWN(pages[i]));
+               pages[i] = (osk_phy_addr)0;
+               __free_page(p);
+       }
+       return MALI_ERROR_OUT_OF_MEMORY;
+}
+
+void kbase_mem_allocator_free(kbase_mem_allocator *allocator, u32 nr_pages, osk_phy_addr *pages)
+{
+       int i = 0;
+       int page_count = 0;
+       int tofree;
+       LIST_HEAD(new_free_list_items);
+
+       OSK_ASSERT(NULL != allocator);
+
+       might_sleep();
+
+       /* Starting by just freeing the overspill.
+        * As we do this outside of the lock we might spill too many pages
+        * or get too many on the free list, but the max_size is just a ballpark so it is ok
+        */
+       tofree = atomic_read(&allocator->free_list_size) + nr_pages - allocator->free_list_max_size;
+       /* if tofree became negative this first for loop will be ignored */
+       for (; i < tofree; i++)
+       {
+               if (likely(0 != pages[i]))
+               {
+                       struct page * p;
+                       p = pfn_to_page(PFN_DOWN(pages[i]));
+                       pages[i] = (osk_phy_addr)0;
+                       __free_page(p);
+               }
+       }
+
+       for (; i < nr_pages; i++)
+       {
+               if (likely(0 != pages[i]))
+               {
+                       struct kbase_page_metadata * md;
+                       struct page * p;
+
+                       p = pfn_to_page(PFN_DOWN(pages[i]));
+                       pages[i] = (osk_phy_addr)0;
+
+                       if (likely(PageHighMem(p)))
+                       {
+                               md = mempool_alloc(allocator->free_list_highmem_pool, GFP_KERNEL);
+                               if (!md)
+                               {
+                                       /* can't put it on the free list, direct release */
+                                       __free_page(p);
+                                       continue;
+                               }
+                       }
+                       else
+                       {
+                               md = lowmem_page_address(p);
+                               BUG_ON(!md);
+                       }
+
+                       INIT_LIST_HEAD(&md->list);
+                       md->page = p;
+                       list_add(&md->list, &new_free_list_items);
+                       page_count++;
+               }
+       }
+
+       mutex_lock(&allocator->free_list_lock);
+       list_splice(&new_free_list_items, &allocator->free_list_head);
+       atomic_add(page_count, &allocator->free_list_size);
+       mutex_unlock(&allocator->free_list_lock);
+}
 
 /**
  * @brief Check the zone compatibility of two regions.
@@ -591,27 +866,6 @@ mali_error kbase_region_tracker_init(kbase_context *kctx)
        return MALI_ERROR_NONE;
 }
 
-typedef struct kbasep_memory_region_performance
-{
-       kbase_memory_performance cpu_performance;
-       kbase_memory_performance gpu_performance;
-} kbasep_memory_region_performance;
-
-static mali_bool kbasep_allocator_order_list_create( osk_phy_allocator * allocators,
-               kbasep_memory_region_performance *region_performance,
-               int memory_region_count, osk_phy_allocator ***sorted_allocs, int allocator_order_count);
-
-/*
- * An iterator which uses one of the orders listed in kbase_phys_allocator_order enum to iterate over allocators array.
- */
-typedef struct kbase_phys_allocator_iterator
-{
-       unsigned int cur_idx;
-       kbase_phys_allocator_array * array;
-       kbase_phys_allocator_order order;
-} kbase_phys_allocator_iterator;
-
-
 mali_error kbase_mem_init(kbase_device * kbdev)
 {
        CSTD_UNUSED(kbdev);
@@ -632,96 +886,15 @@ void kbase_mem_halt(kbase_device * kbdev)
 
 void kbase_mem_term(kbase_device * kbdev)
 {
-       u32 i;
        kbasep_mem_device * memdev;
        OSK_ASSERT(kbdev);
 
        memdev = &kbdev->memdev;
 
-       for (i = 0; i < memdev->allocators.count; i++)
-       {
-               osk_phy_allocator_term(&memdev->allocators.allocs[i]);
-       }
-       kfree(memdev->allocators.allocs);
-       kfree(memdev->allocators.sorted_allocs[0]);
-
        kbase_mem_usage_term(&memdev->usage);
 }
 KBASE_EXPORT_TEST_API(kbase_mem_term)
 
-static mali_error kbase_phys_it_init(kbase_device * kbdev, kbase_phys_allocator_iterator * it, kbase_phys_allocator_order order)
-{
-       OSK_ASSERT(kbdev);
-       OSK_ASSERT(it);
-
-       if (OSK_SIMULATE_FAILURE(OSK_BASE_MEM))
-       {
-               return MALI_ERROR_OUT_OF_MEMORY;
-       }
-
-       if (!kbdev->memdev.allocators.count)
-       {
-               return MALI_ERROR_OUT_OF_MEMORY;
-       }
-       
-       it->cur_idx = 0;
-       it->array = &kbdev->memdev.allocators;
-       it->order = order;
-
-#ifdef CONFIG_MALI_DEBUG
-       it->array->it_bound = MALI_TRUE;
-#endif /* CONFIG_MALI_DEBUG */
-
-       return MALI_ERROR_NONE;
-}
-
-static void kbase_phys_it_term(kbase_phys_allocator_iterator * it)
-{
-       OSK_ASSERT(it);
-       it->cur_idx = 0;
-#ifdef CONFIG_MALI_DEBUG
-       it->array->it_bound = MALI_FALSE;
-#endif /* CONFIG_MALI_DEBUG */
-       it->array = NULL;
-       return;
-}
-
-static osk_phy_allocator * kbase_phys_it_deref(kbase_phys_allocator_iterator * it)
-{
-       OSK_ASSERT(it);
-       OSK_ASSERT(it->array);
-
-       if (it->cur_idx < it->array->count)
-       {
-               return it->array->sorted_allocs[it->order][it->cur_idx];
-       }
-       else
-       {
-               return NULL;
-       }
-}
-
-static osk_phy_allocator * kbase_phys_it_deref_and_advance(kbase_phys_allocator_iterator * it)
-{
-       osk_phy_allocator * alloc;
-
-       OSK_ASSERT(it);
-       OSK_ASSERT(it->array);
-
-       alloc = kbase_phys_it_deref(it);
-       if (alloc)
-       {
-               it->cur_idx++;
-       }
-       return alloc;
-}
-
-/*
- * Page free helper.
- * Handles that commit objects tracks the pages we free
- */
-static void kbase_free_phy_pages_helper(kbase_va_region * reg, u32 nr_pages);
-
 mali_error kbase_mem_usage_init(kbasep_mem_usage * usage, u32 max_pages)
 {
        OSK_ASSERT(usage);
@@ -908,9 +1081,8 @@ struct kbase_va_region *kbase_alloc_free_region(kbase_context *kctx, u64 start_p
 
        new_reg->start_pfn = start_pfn;
        new_reg->nr_pages = nr_pages;
+       new_reg->nr_alloc_pages = 0;
        OSK_DLIST_INIT(&new_reg->map_list);
-       new_reg->root_commit.allocator = NULL;
-       new_reg->last_commit = &new_reg->root_commit;
 
        return new_reg;
 }
@@ -1482,157 +1654,21 @@ void kbase_update_region_flags(struct kbase_va_region *reg, u32 flags, mali_bool
        {
                reg->flags |= KBASE_REG_SHARE_BOTH;
        }
-}
-
-static void kbase_free_phy_pages_helper(kbase_va_region * reg, u32 nr_pages_to_free)
-{
-       osk_phy_addr *page_array;
-
-       u32 nr_pages;
-
-       OSK_ASSERT(reg);
-       OSK_ASSERT(reg->kctx);
-
-       /* Can't call this on TB buffers */
-       OSK_ASSERT(0 == (reg->flags & KBASE_REG_IS_TB));
-       /* can't be called on imported types */
-       OSK_ASSERT(BASE_TMEM_IMPORT_TYPE_INVALID == reg->imported_type);
-       /* Free of too many pages attempted! */
-       OSK_ASSERT(reg->nr_alloc_pages >= nr_pages_to_free);
-       /* A complete free is required if not marked as growable */
-       OSK_ASSERT((reg->flags & KBASE_REG_GROWABLE) || (reg->nr_alloc_pages == nr_pages_to_free));
-
-       if (0 == nr_pages_to_free)
-       {
-               /* early out if nothing to free */
-               return;
-       }
 
-       nr_pages = nr_pages_to_free;
-       
-       page_array = kbase_get_phy_pages(reg);
-
-       OSK_ASSERT(nr_pages_to_free == 0 || page_array != NULL);
-
-       while (nr_pages)
+       if (!(flags & BASE_MEM_DONT_ZERO_INIT))
        {
-               kbase_mem_commit * commit;
-               commit = reg->last_commit;
-
-               if (nr_pages >= commit->nr_pages)
-               {
-                       /* free the whole commit */
-                       kbase_phy_pages_free(reg->kctx->kbdev, commit->allocator, commit->nr_pages,
-                                       page_array + reg->nr_alloc_pages - commit->nr_pages);
-                       
-                       /* update page counts */
-                       nr_pages -= commit->nr_pages;
-                       reg->nr_alloc_pages -= commit->nr_pages;
-                       
-                       if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM )
-                       {
-                               kbase_process_page_usage_dec(reg->kctx, commit->nr_pages);
-                       }
-
-                       /* free the node (unless it's the root node) */
-                       if (commit != &reg->root_commit)
-                       {
-                               reg->last_commit = commit->prev;
-                               kfree(commit);
-                       }
-                       else
-                       {
-                               /* mark the root node as having no commit */
-                               commit->nr_pages = 0;
-                               OSK_ASSERT(nr_pages == 0);
-                               OSK_ASSERT(reg->nr_alloc_pages == 0);
-                               break;
-                       }
-               }
-               else
-               {
-                       /* partial free of this commit */
-                       kbase_phy_pages_free(reg->kctx->kbdev, commit->allocator, nr_pages,
-                                       page_array + reg->nr_alloc_pages - nr_pages);
-                       commit->nr_pages -= nr_pages;
-                       reg->nr_alloc_pages -= nr_pages;
-                       if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM )
-                       {
-                               kbase_process_page_usage_dec(reg->kctx, nr_pages);
-                       }
-                       break; /* end the loop */
-               }
+               reg->flags |= KBASE_REG_MUST_ZERO;
        }
-
-       kbase_mem_usage_release_pages(&reg->kctx->usage, nr_pages_to_free);
 }
 KBASE_EXPORT_TEST_API(kbase_update_region_flags)
 
-u32 kbase_phy_pages_alloc(struct kbase_device *kbdev, osk_phy_allocator *allocator, u32 nr_pages,
-               osk_phy_addr *pages)
-{
-       OSK_ASSERT(kbdev != NULL);
-       OSK_ASSERT(allocator != NULL);
-       OSK_ASSERT(pages != NULL);
-
-       if (allocator->type == OSKP_PHY_ALLOCATOR_OS)
-       {
-               u32 pages_allocated;
-
-               /* Claim pages from OS shared quota. Note that shared OS memory may be used by different allocators. That's why
-                * page request is made here and not on per-allocator basis */
-               if (MALI_ERROR_NONE != kbase_mem_usage_request_pages(&kbdev->memdev.usage, nr_pages))
-               {
-                       return 0;
-               }
-
-               pages_allocated = osk_phy_pages_alloc(allocator, nr_pages, pages);
-
-               if (pages_allocated < nr_pages)
-               {
-                       kbase_mem_usage_release_pages(&kbdev->memdev.usage, nr_pages - pages_allocated);
-               }
-               return pages_allocated;
-       }
-       else
-       {
-               /* Dedicated memory is tracked per allocator. Memory limits are checked in osk_phy_pages_alloc function */
-               return osk_phy_pages_alloc(allocator, nr_pages, pages);
-       }
-}
-KBASE_EXPORT_TEST_API(kbase_phy_pages_alloc)
-
-void kbase_phy_pages_free(struct kbase_device *kbdev, osk_phy_allocator *allocator, u32 nr_pages, osk_phy_addr *pages)
-{
-       OSK_ASSERT(kbdev != NULL);
-       OSK_ASSERT(allocator != NULL);
-       OSK_ASSERT(pages != NULL);
-
-       osk_phy_pages_free(allocator, nr_pages, pages);
-
-       if (allocator->type == OSKP_PHY_ALLOCATOR_OS)
-       {
-               /* release pages from OS shared quota */
-               kbase_mem_usage_release_pages(&kbdev->memdev.usage, nr_pages);
-       }
-}
-KBASE_EXPORT_TEST_API(kbase_phy_pages_free)
-
-
 mali_error kbase_alloc_phy_pages_helper(struct kbase_va_region *reg, u32 nr_pages_requested)
 {
-       kbase_phys_allocator_iterator it;
-       osk_phy_addr *page_array;
-       u32 nr_pages_left;
-       u32 num_pages_on_start;
-       u32 pages_committed;
-       kbase_phys_allocator_order order;
-       u32 performance_flags;
+       kbase_context * kctx;
+       osk_phy_addr * page_array;
 
        OSK_ASSERT(reg);
-       OSK_ASSERT(reg->kctx);
-
-       /* Can't call this on TB or UMP buffers */
+       /* Can't call this on TB buffers */
        OSK_ASSERT(0 == (reg->flags & KBASE_REG_IS_TB));
        /* can't be called on imported types */
        OSK_ASSERT(BASE_TMEM_IMPORT_TYPE_INVALID == reg->imported_type);
@@ -1641,148 +1677,71 @@ mali_error kbase_alloc_phy_pages_helper(struct kbase_va_region *reg, u32 nr_page
        /* A complete commit is required if not marked as growable */
        OSK_ASSERT((reg->flags & KBASE_REG_GROWABLE) || (reg->nr_pages == nr_pages_requested));
 
+       /* early out if nothing to do */
        if (0 == nr_pages_requested)
-       {
-               /* early out if nothing to do */
                return MALI_ERROR_NONE;
-       }
 
-       /* track the number pages so we can roll back on alloc fail */
-       num_pages_on_start = reg->nr_alloc_pages;
-       nr_pages_left = nr_pages_requested;
+       kctx = reg->kctx;
+       OSK_ASSERT(kctx);
 
-       page_array = kbase_get_phy_pages(reg);
-       OSK_ASSERT(page_array);
-
-       /* claim the pages from our per-context quota */
-       if (MALI_ERROR_NONE != kbase_mem_usage_request_pages(&reg->kctx->usage, nr_pages_requested))
+       if (MALI_ERROR_NONE != kbase_mem_usage_request_pages(&kctx->usage, nr_pages_requested))
        {
                return MALI_ERROR_OUT_OF_MEMORY;
        }
 
-       /* First try to extend the last commit */
-       if (reg->last_commit->allocator)
-       {
-               pages_committed = kbase_phy_pages_alloc(reg->kctx->kbdev, reg->last_commit->allocator, nr_pages_left,
-                               page_array + reg->nr_alloc_pages);
-               reg->last_commit->nr_pages += pages_committed;
-               reg->nr_alloc_pages += pages_committed;
-               nr_pages_left -= pages_committed;
-
-               if (!nr_pages_left)
-               {
-                       if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM)
-                       {
-                               kbase_process_page_usage_inc(reg->kctx, nr_pages_requested);
-                       }
-                       return MALI_ERROR_NONE;
-               }
-       }
-
-       performance_flags = reg->flags & (KBASE_REG_CPU_CACHED | KBASE_REG_GPU_CACHED);
+       page_array = kbase_get_phy_pages(reg);
 
-       if (performance_flags == 0)
-       {
-               order = ALLOCATOR_ORDER_CONFIG;
-       }
-       else if (performance_flags == KBASE_REG_CPU_CACHED)
-       {
-               order = ALLOCATOR_ORDER_CPU_PERFORMANCE;
-       }
-       else if (performance_flags == KBASE_REG_GPU_CACHED)
-       {
-               order = ALLOCATOR_ORDER_GPU_PERFORMANCE;
-       }
-       else
+       if (MALI_ERROR_NONE != kbase_mem_allocator_alloc(&kctx->osalloc, nr_pages_requested, page_array + reg->nr_alloc_pages, reg->flags))
        {
-               order = ALLOCATOR_ORDER_CPU_GPU_PERFORMANCE;
+               kbase_mem_usage_release_pages(&kctx->usage, nr_pages_requested);
+               return MALI_ERROR_OUT_OF_MEMORY;
        }
 
-       /* If not fully commited (or no prev allocator) we need to ask all the allocators */
+       reg->nr_alloc_pages += nr_pages_requested;
 
-       /* initialize the iterator we use to loop over the memory providers */
-       if (MALI_ERROR_NONE == kbase_phys_it_init(reg->kctx->kbdev, &it, order))
+       if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM)
        {
-               for (;nr_pages_left && kbase_phys_it_deref(&it); kbase_phys_it_deref_and_advance(&it))
-               {
-                       pages_committed = kbase_phy_pages_alloc(reg->kctx->kbdev, kbase_phys_it_deref(&it), nr_pages_left,
-                                       page_array + reg->nr_alloc_pages);
+               kbase_process_page_usage_inc(kctx, nr_pages_requested);
+       }
 
-                       OSK_ASSERT(pages_committed <= nr_pages_left);
+       return MALI_ERROR_NONE;
+}
 
-                       if (pages_committed)
-                       {
-                               /* got some pages, track them */
-                               kbase_mem_commit * commit;
+void kbase_free_phy_pages_helper(struct kbase_va_region * reg, u32 nr_pages_to_free)
+{
+       kbase_context * kctx;
+       osk_phy_addr * page_array;
 
-                               if (reg->last_commit->allocator)
-                               {
-                                       if(OSK_SIMULATE_FAILURE(OSK_OSK))
-                                       {
-                                               commit = NULL;
-                                       }
-                                       else
-                                       {
-                                               commit = (kbase_mem_commit*)kzalloc(sizeof(*commit), GFP_KERNEL);
-                                       }
-
-                                       if (commit == NULL)
-                                       {
-                                               kbase_phy_pages_free(reg->kctx->kbdev, kbase_phys_it_deref(&it), pages_committed,
-                                                               page_array + reg->nr_alloc_pages);
-                                               break;
-                                       }
-                                       commit->prev = reg->last_commit;
-                               }
-                               else
-                               {
-                                       commit = reg->last_commit;
-                               }
+       OSK_ASSERT(reg);
+       /* Can't call this on TB buffers */
+       OSK_ASSERT(0 == (reg->flags & KBASE_REG_IS_TB));
+       /* can't be called on imported types */
+       OSK_ASSERT(BASE_TMEM_IMPORT_TYPE_INVALID == reg->imported_type);
+       /* Free of too many pages attempted! */
+       OSK_ASSERT(reg->nr_alloc_pages >= nr_pages_to_free);
+       /* A complete free is required if not marked as growable */
+       OSK_ASSERT((reg->flags & KBASE_REG_GROWABLE) || (reg->nr_alloc_pages == nr_pages_to_free));
 
-                               commit->allocator = kbase_phys_it_deref(&it);
-                               commit->nr_pages = pages_committed;
+       /* early out if nothing to do */
+       if (0 == nr_pages_to_free)
+               return;
 
-                               reg->last_commit = commit;
-                               reg->nr_alloc_pages += pages_committed;
+       kctx = reg->kctx;
+       OSK_ASSERT(kctx);
 
-                               nr_pages_left -= pages_committed;
-                       }
-               }
+       page_array = kbase_get_phy_pages(reg);
 
-               /* no need for the iterator any more */
-               kbase_phys_it_term(&it);
+       kbase_mem_allocator_free(&kctx->osalloc, nr_pages_to_free, page_array + reg->nr_alloc_pages - nr_pages_to_free);
 
-               if (nr_pages_left == 0)
-               {
-                       if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM)
-                       {
-                               kbase_process_page_usage_inc(reg->kctx, nr_pages_requested);
-                       }
-                       return MALI_ERROR_NONE;
-               }
-       }
+       reg->nr_alloc_pages -= nr_pages_to_free;
 
-       /* failed to allocate enough memory, roll back */
-       if (reg->nr_alloc_pages != num_pages_on_start)
-       {
-               /*we need the auxiliary var below since kbase_free_phy_pages_helper updates reg->nr_alloc_pages*/
-               u32 track_nr_alloc_pages = reg->nr_alloc_pages;
-               /* we must temporarily inflate the usage tracking as kbase_free_phy_pages_helper decrements it */
-               kbase_process_page_usage_inc(reg->kctx, reg->nr_alloc_pages - num_pages_on_start);
-               /* kbase_free_phy_pages_helper implicitly calls kbase_mem_usage_release_pages */
-               kbase_free_phy_pages_helper(reg, reg->nr_alloc_pages - num_pages_on_start);
-               /* Release the remaining pages */
-               kbase_mem_usage_release_pages(&reg->kctx->usage,
-                                             nr_pages_requested - (track_nr_alloc_pages - num_pages_on_start));
-       }
-       else
+       if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM )
        {
-               kbase_mem_usage_release_pages(&reg->kctx->usage, nr_pages_requested);
+               kbase_process_page_usage_dec(reg->kctx, nr_pages_to_free);
        }
-       return MALI_ERROR_OUT_OF_MEMORY;
+       kbase_mem_usage_release_pages(&reg->kctx->usage, nr_pages_to_free);
 }
 
-
 /* Frees all allocated pages of a region */
 void kbase_free_phy_pages(struct kbase_va_region *reg)
 {
@@ -1830,7 +1789,7 @@ void kbase_free_phy_pages(struct kbase_va_region *reg)
                        kbase_device_trace_buffer_uninstall(reg->kctx);
                        vfree(tb);
                }
-               else if (reg->flags & KBASE_REG_IS_RB)
+               else if (reg->flags & (KBASE_REG_IS_RB | KBASE_REG_IS_MMU_DUMP))
                {
                        /* nothing to do */
                }
@@ -1882,6 +1841,7 @@ int kbase_alloc_phy_pages(struct kbase_va_region *reg, u32 vsize, u32 size)
        }
 
        kbase_set_phy_pages(reg, page_array);
+       reg->nr_alloc_pages = 0;
 
        if (MALI_ERROR_NONE != kbase_alloc_phy_pages_helper(reg, size))
        {
@@ -2269,6 +2229,7 @@ mali_error kbase_tmem_set_size(kbase_context *kctx, mali_addr64 gpu_addr, u32 si
                mali_error err;
                delta = size-reg->nr_alloc_pages;
                /* Allocate some more pages */
+
                if (MALI_ERROR_NONE != kbase_alloc_phy_pages_helper(reg, delta))
                {
                        *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
@@ -2603,233 +2564,3 @@ void kbase_gpu_vm_unlock(kbase_context *kctx)
 }
 KBASE_EXPORT_TEST_API(kbase_gpu_vm_unlock)
 
-/* will be called during init time only */
-mali_error kbase_register_memory_regions(kbase_device * kbdev, const kbase_attribute *attributes)
-{
-       int total_regions;
-       int dedicated_regions;
-       int allocators_initialized;
-       osk_phy_allocator * allocs;
-       kbase_memory_performance shared_memory_performance;
-       kbasep_memory_region_performance *region_performance;
-       kbase_memory_resource *resource;
-       const kbase_attribute *current_attribute;
-       u32 max_shared_memory;
-       kbasep_mem_device * memdev;
-
-       OSK_ASSERT(kbdev);
-       OSK_ASSERT(attributes);
-
-       memdev = &kbdev->memdev;
-
-       /* Programming error to register memory after we've started using the iterator interface */
-#ifdef CONFIG_MALI_DEBUG
-       OSK_ASSERT(memdev->allocators.it_bound == MALI_FALSE);
-#endif /* CONFIG_MALI_DEBUG */
-
-       max_shared_memory = (u32) kbasep_get_config_value(kbdev, attributes, KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_MAX);
-       shared_memory_performance =
-                       (kbase_memory_performance)kbasep_get_config_value(kbdev, attributes, KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_PERF_GPU);
-       /* count dedicated_memory_regions */
-       dedicated_regions = kbasep_get_config_attribute_count_by_id(attributes, KBASE_CONFIG_ATTR_MEMORY_RESOURCE);
-
-       total_regions = dedicated_regions;
-       if (max_shared_memory > 0)
-       {
-               total_regions++;
-       }
-
-       if (total_regions == 0)
-       {
-               OSK_PRINT_ERROR(OSK_BASE_MEM,  "No memory regions specified");
-               return MALI_ERROR_FUNCTION_FAILED;
-       }
-
-       if(OSK_SIMULATE_FAILURE(OSK_OSK))
-       {
-               region_performance = NULL;
-       }
-       else
-       {
-               OSK_ASSERT(0 != total_regions);
-               region_performance = kmalloc(sizeof(kbasep_memory_region_performance) * total_regions, GFP_KERNEL);
-       }
-
-       if (region_performance == NULL)
-       {
-               goto out;
-       }
-
-       if(OSK_SIMULATE_FAILURE(OSK_OSK))
-       {
-               allocs = NULL;
-       }
-       else
-       {
-               OSK_ASSERT(0 != total_regions);
-               allocs = kmalloc(sizeof(osk_phy_allocator) * total_regions, GFP_KERNEL);
-       }
-
-       if (allocs == NULL)
-       {
-               goto out_perf;
-       }
-
-       current_attribute = attributes;
-       allocators_initialized = 0;
-       while (current_attribute != NULL)
-       {
-               current_attribute = kbasep_get_next_attribute(current_attribute, KBASE_CONFIG_ATTR_MEMORY_RESOURCE);
-
-               if (current_attribute != NULL)
-               {
-                       resource = (kbase_memory_resource *)current_attribute->data;
-                       if (OSK_ERR_NONE != osk_phy_allocator_init(&allocs[allocators_initialized], resource->base,
-                               (u32)(resource->size >> PAGE_SHIFT), resource->name))
-                       {
-                               goto out_allocator_term;
-                       }
-
-                       kbasep_get_memory_performance(resource, &region_performance[allocators_initialized].cpu_performance,
-                               &region_performance[allocators_initialized].gpu_performance);
-                       current_attribute++;
-                       allocators_initialized++;
-               }
-       }
-
-       /* register shared memory region */
-       if (max_shared_memory > 0)
-       {
-               if (OSK_ERR_NONE != osk_phy_allocator_init(&allocs[allocators_initialized], 0,
-                               max_shared_memory >> PAGE_SHIFT, NULL))
-               {
-                       goto out_allocator_term;
-               }
-
-               region_performance[allocators_initialized].cpu_performance = KBASE_MEM_PERF_NORMAL;
-               region_performance[allocators_initialized].gpu_performance = shared_memory_performance;
-               allocators_initialized++;
-       }
-
-       if (MALI_ERROR_NONE != kbase_mem_usage_init(&memdev->usage, max_shared_memory >> PAGE_SHIFT))
-       {
-               goto out_allocator_term;
-       }
-
-       if (MALI_ERROR_NONE != kbasep_allocator_order_list_create(allocs, region_performance, total_regions, memdev->allocators.sorted_allocs,
-                       ALLOCATOR_ORDER_COUNT))
-       {
-               goto out_memctx_term;
-       }
-
-       memdev->allocators.allocs = allocs;
-       memdev->allocators.count = total_regions;
-
-       kfree(region_performance);
-
-       return MALI_ERROR_NONE;
-
-out_memctx_term:
-       kbase_mem_usage_term(&memdev->usage);
-out_allocator_term:
-       while (allocators_initialized-- > 0)
-       {
-               osk_phy_allocator_term(&allocs[allocators_initialized]);
-       }
-       kfree(allocs);
-out_perf:
-       kfree(region_performance);
-out:
-       return MALI_ERROR_OUT_OF_MEMORY;
-}
-KBASE_EXPORT_TEST_API(kbase_register_memory_regions)
-
-static mali_error kbasep_allocator_order_list_create( osk_phy_allocator * allocators,
-               kbasep_memory_region_performance *region_performance, int memory_region_count,
-               osk_phy_allocator ***sorted_allocs, int allocator_order_count)
-{
-       int performance;
-       int regions_sorted;
-       int i;
-       void *sorted_alloc_mem_block;
-
-       if(OSK_SIMULATE_FAILURE(OSK_OSK))
-       {
-               sorted_alloc_mem_block = NULL;
-       }
-       else
-       {
-               OSK_ASSERT(0 != memory_region_count);
-               OSK_ASSERT(0 != allocator_order_count);
-               sorted_alloc_mem_block = kmalloc(sizeof(osk_phy_allocator **) * memory_region_count * allocator_order_count, GFP_KERNEL);
-       }
-
-       if (sorted_alloc_mem_block == NULL)
-       {
-               goto out;
-       }
-
-       /* each allocator list points to memory in recently allocated block */
-       for (i = 0; i < ALLOCATOR_ORDER_COUNT; i++)
-       {
-               sorted_allocs[i] = (osk_phy_allocator **)sorted_alloc_mem_block + memory_region_count*i;
-       }
-
-       /* use the same order as in config file */
-       for (i = 0; i < memory_region_count; i++)
-       {
-               sorted_allocs[ALLOCATOR_ORDER_CONFIG][i] = &allocators[i];
-       }
-
-       /* Sort allocators by GPU performance */
-       performance = KBASE_MEM_PERF_FAST;
-       regions_sorted = 0;
-       while (performance >= KBASE_MEM_PERF_SLOW)
-       {
-               for (i = 0; i < memory_region_count; i++)
-               {
-                       if (region_performance[i].gpu_performance == (kbase_memory_performance)performance)
-                       {
-                               sorted_allocs[ALLOCATOR_ORDER_GPU_PERFORMANCE][regions_sorted] = &allocators[i];
-                               regions_sorted++;
-                       }
-               }
-               performance--;
-       }
-
-       /* Sort allocators by CPU performance */
-       performance = KBASE_MEM_PERF_FAST;
-       regions_sorted = 0;
-       while (performance >= KBASE_MEM_PERF_SLOW)
-       {
-               for (i = 0; i < memory_region_count; i++)
-               {
-                       if ((int)region_performance[i].cpu_performance == performance)
-                       {
-                               sorted_allocs[ALLOCATOR_ORDER_CPU_PERFORMANCE][regions_sorted] = &allocators[i];
-                               regions_sorted++;
-                       }
-               }
-               performance--;
-       }
-
-       /* Sort allocators by CPU and GPU performance (equally important) */
-       performance = 2 * KBASE_MEM_PERF_FAST;
-       regions_sorted = 0;
-       while (performance >= 2*KBASE_MEM_PERF_SLOW)
-       {
-               for (i = 0; i < memory_region_count; i++)
-               {
-                       if ((int)(region_performance[i].cpu_performance + region_performance[i].gpu_performance) == performance)
-                       {
-                               sorted_allocs[ALLOCATOR_ORDER_CPU_GPU_PERFORMANCE][regions_sorted] = &allocators[i];
-                               regions_sorted++;
-                       }
-               }
-               performance--;
-       }
-       return MALI_ERROR_NONE;
-out:
-       return MALI_ERROR_OUT_OF_MEMORY;
-}
-
index 44446fb0947ea5abede36deced667302826557a7..471c6852adf48b59fd0e4176f6e96f34f3557f24 100644 (file)
@@ -49,6 +49,7 @@ updates and generates duplicate page faults as the page table information used b
 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2)
 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316)
 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630)
+
 /**
  * A CPU mapping
  */
@@ -61,20 +62,6 @@ typedef struct kbase_cpu_mapping
        void                *private; /* Use for VMA */
 } kbase_cpu_mapping;
 
-/**
- * A physical memory (sub-)commit
- */
-typedef struct kbase_mem_commit
-{
-       osk_phy_allocator *           allocator;
-       u32                           nr_pages;
-       struct kbase_mem_commit *     prev;
-       /*
-        * The offset of the commit is implict by
-        * the prev_commit link position of this node
-        */
-} kbase_mem_commit;
-
 /**
  * A GPU memory region, and attributes for CPU mappings.
  */
@@ -113,7 +100,9 @@ typedef struct kbase_va_region
 #define KBASE_REG_GPU_RD     (1ul<<16) /* GPU write access */
 #define KBASE_REG_CPU_RD     (1ul<<17) /* CPU read access */
 
-#define KBASE_REG_FLAGS_NR_BITS    18  /* Number of bits used by kbase_va_region flags */
+#define KBASE_REG_MUST_ZERO  (1ul<<18) /* No zeroing needed */
+
+#define KBASE_REG_FLAGS_NR_BITS    19  /* Number of bits used by kbase_va_region flags */
 
 #define KBASE_REG_ZONE_PMEM  KBASE_REG_ZONE(0)
 
@@ -153,18 +142,6 @@ typedef struct kbase_va_region
        u32                 nr_alloc_pages; /* nr of pages allocated */
        u32                 extent;         /* nr of pages alloc'd on PF */
 
-       /* two variables to track our physical commits: */
-
-       /* We always have a root commit.
-        * Most allocation will only have this one.
-        * */
-       kbase_mem_commit    root_commit;
-
-       /* This one is initialized to point to the root_commit,
-        * but if a new and separate commit is needed it will point
-        * to the last (still valid) commit we've done */
-       kbase_mem_commit *  last_commit;
-
        osk_phy_addr        *phy_pages;
 
        osk_dlist           map_list;
@@ -208,68 +185,54 @@ static INLINE void kbase_set_phy_pages(struct kbase_va_region *reg, osk_phy_addr
        reg->phy_pages = phy_pages;
 }
 
+mali_error kbase_mem_init(kbase_device * kbdev);
+void       kbase_mem_halt(kbase_device * kbdev);
+void       kbase_mem_term(kbase_device * kbdev);
+
 /**
- * @brief Allocate physical memory and track shared OS memory usage.
- *
- * This function is kbase wrapper of osk_phy_pages_alloc. Apart from allocating memory it also tracks shared OS memory
- * usage and fails whenever shared memory limits would be exceeded.
- *
- * @param[in] kbdev     pointer to kbase_device structure for which memory is allocated
- * @param[in] allocator initialized physical allocator
- * @param[in] nr_pages  number of physical pages to allocate
- * @param[out] pages    array of \a nr_pages elements storing the physical
- *                      address of an allocated page
- * @return The number of pages successfully allocated,
- * which might be lower than requested, including zero pages.
- *
- * @see ::osk_phy_pages_alloc
+ * @brief Initialize an OS based memory allocator.
+ *
+ * Initializes a allocator.
+ * Must be called before any allocation is attempted.
+ * \a kbase_mem_allocator_alloc and \a kbase_mem_allocator_free is used
+ * to allocate and free memory.
+ * \a kbase_mem_allocator_term must be called to clean up the allocator.
+ * All memory obtained via \a kbase_mem_allocator_alloc must have been
+ * \a kbase_mem_allocator_free before \a kbase_mem_allocator_term is called.
+ *
+ * @param allocator Allocator object to initialize
+ * @param max_size  Maximum number of pages to keep on the freelist.
+ * @return MALI_ERROR_NONE on success, an error code indicating what failed on error.
  */
-u32 kbase_phy_pages_alloc(struct kbase_device *kbdev, osk_phy_allocator *allocator, u32 nr_pages, osk_phy_addr *pages);
-
+mali_error kbase_mem_allocator_init(kbase_mem_allocator * allocator, unsigned int max_size);
 /**
- * @brief Free physical memory and track shared memory usage
- *
- * This function, like osk_phy_pages_free, frees physical memory but also tracks shared OS memory usage.
+ * @brief Allocate memory via an OS based memory allocator.
  *
- * @param[in] kbdev     pointer to kbase_device for which memory is allocated
- * @param[in] allocator initialized physical allocator
- * @param[in] nr_pages  number of physical pages to allocate
- * @param[out] pages    array of \a nr_pages elements storing the physical
- *                      address of an allocated page
- *
- * @see ::osk_phy_pages_free
+ * @param[in]  allocator Allocator to obtain the memory from
+ * @param      nr_pages  Number of pages to allocate
+ * @param[out] pages     Pointer to an array where the physical address of the allocated pages will be stored
+ * @param      flags     Allocation flag, 0 or KBASE_REG_MUST_ZERO supported.
+ * @return MALI_ERROR_NONE if the pages were allocated, an error code indicating what failed on error
  */
-void kbase_phy_pages_free(struct kbase_device *kbdev, osk_phy_allocator *allocator, u32 nr_pages, osk_phy_addr *pages);
-
+mali_error kbase_mem_allocator_alloc(kbase_mem_allocator * allocator, u32 nr_pages, osk_phy_addr *pages, int flags);
 /**
- * @brief Register shared and dedicated memory regions
- *
- * Function registers shared and dedicated memory regions (registers physical allocator for each region)
- * using given configuration attributes. Additionally, several ordered lists of physical allocators are created with
- * different sort order (based on CPU, GPU, CPU+GPU performance and order in config). If there are many memory regions
- * with the same performance, then order in which they appeared in config is important. Shared OS memory is treated as if
- * it's defined after dedicated memory regions, so unless it matches region's performance flags better, it's chosen last.
+ * @brief Free memory obtained for an OS based memory allocator.
  *
- * @param[in] kbdev       pointer to kbase_device for which regions are registered
- * @param[in] attributes  array of configuration attributes. It must be terminated with KBASE_CONFIG_ATTR_END attribute
- *
- * @return MALI_ERROR_NONE if no error occurred. Error code otherwise
- *
- * @see ::kbase_alloc_phy_pages_helper
+ * @param[in] allocator Allocator to free the memory back to
+ * @param     nr_pages  Number of pages to free
+ * @param[in] pages     Pointer to an array holding the physical address of the paghes to free.
  */
-mali_error kbase_register_memory_regions(kbase_device * kbdev, const kbase_attribute *attributes);
-
+void kbase_mem_allocator_free(kbase_mem_allocator * allocator, u32 nr_pages, osk_phy_addr *pages);
 /**
- * @brief Frees memory regions registered for the given device.
+ * @brief Terminate an OS based memory allocator.
+ *
+ * Frees all cached allocations and clean up internal state.
+ * All allocate pages must have been \a kbase_mem_allocator_free before
+ * this function is called.
  *
- * @param[in] kbdev       pointer to kbase device for which memory regions are to be freed
+ * @param[in] allocator Allocator to terminate
  */
-void kbase_free_memory_regions(kbase_device * kbdev);
-
-mali_error kbase_mem_init(kbase_device * kbdev);
-void       kbase_mem_halt(kbase_device * kbdev);
-void       kbase_mem_term(kbase_device * kbdev);
-
+void kbase_mem_allocator_term(kbase_mem_allocator * allocator);
 
 /**
  * @brief Initializes memory context which tracks memory usage.
@@ -396,22 +359,6 @@ void kbase_mmu_disable (kbase_context *kctx);
 
 void kbase_mmu_interrupt(kbase_device * kbdev, u32 irq_stat);
 
-/**
- * @brief Allocates physical pages using registered physical allocators.
- *
- * Function allocates physical pages using registered physical allocators. Allocator list is iterated until all pages
- * are successfully allocated. Function tries to match the most appropriate order of iteration basing on
- * KBASE_REG_CPU_CACHED and KBASE_REG_GPU_CACHED flags of the region.
- *
- * @param[in]   reg       memory region in which physical pages are supposed to be allocated
- * @param[in]   nr_pages  number of physical pages to allocate
- *
- * @return MALI_ERROR_NONE if all pages have been successfully allocated. Error code otherwise
- *
- * @see kbase_register_memory_regions
- */
-mali_error kbase_alloc_phy_pages_helper(kbase_va_region *reg, u32 nr_pages);
-
 /** Dump the MMU tables to a buffer
  *
  * This function allocates a buffer (of @c nr_pages pages) to hold a dump of the MMU tables and fills it. If the
@@ -609,5 +556,27 @@ enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer * timer);
 void kbase_as_poking_timer_retain(kbase_as * as);
 void kbase_as_poking_timer_release(kbase_as * as);
 
+/**
+ * @brief Allocates physical pages.
+ *
+ * Allocates \a nr_pages_requested and updates the region object.
+ *
+ * @param[in]   reg       memory region in which physical pages are supposed to be allocated
+ * @param[in]   nr_pages  number of physical pages to allocate
+ *
+ * @return MALI_ERROR_NONE if all pages have been successfully allocated. Error code otherwise
+ */
+mali_error kbase_alloc_phy_pages_helper(struct kbase_va_region *reg, u32 nr_pages_requested);
+
+/**
+ * @brief Free physical pages.
+ *
+ * Frees \a nr_pages and updates the region object.
+ *
+ * @param[in]   reg       memory region in which physical pages are supposed to be allocated
+ * @param[in]   nr_pages  number of physical pages to free
+ */
+void kbase_free_phy_pages_helper(struct kbase_va_region * reg, u32 nr_pages);
+
 
 #endif /* _KBASE_MEM_H_ */
index 0334ff9375211a941e2eed5ceb98208ca26df6c2..c05e1083b6b25a4bc61cfbb040436374cbdf7e81 100644 (file)
@@ -262,15 +262,12 @@ osk_phy_addr kbase_mmu_alloc_pgd(kbase_context *kctx)
        osk_phy_addr pgd;
        u64 *page;
        int i;
-       u32 count;
        OSK_ASSERT( NULL != kctx);
        if (MALI_ERROR_NONE != kbase_mem_usage_request_pages(&kctx->usage, 1))
        {
                return 0;
        }
-
-       count = kbase_phy_pages_alloc(kctx->kbdev, &kctx->pgd_allocator, 1, &pgd);
-       if (count != 1)
+       if (MALI_ERROR_NONE != kbase_mem_allocator_alloc(kctx->pgd_allocator, 1, &pgd, 0))
        {
                kbase_mem_usage_release_pages(&kctx->usage, 1);
                return 0;
@@ -279,7 +276,7 @@ osk_phy_addr kbase_mmu_alloc_pgd(kbase_context *kctx)
        page = osk_kmap(pgd);
        if(NULL == page)
        {
-               kbase_phy_pages_free(kctx->kbdev, &kctx->pgd_allocator, 1, &pgd);
+               kbase_mem_allocator_free(kctx->pgd_allocator, 1, &pgd);
                kbase_mem_usage_release_pages(&kctx->usage, 1);
                return 0;
        }
@@ -758,8 +755,8 @@ static void mmu_teardown_level(kbase_context *kctx, osk_phy_addr pgd, int level,
                        beenthere("pte %lx level %d", (unsigned long)target_pgd, level + 1);
                        if (zap)
                        {
-                               kbase_phy_pages_free(kctx->kbdev, &kctx->pgd_allocator, 1, &target_pgd);
-                               kbase_process_page_usage_dec(kctx, 1 );
+                               kbase_mem_allocator_free(kctx->pgd_allocator, 1, &target_pgd);
+                               kbase_process_page_usage_dec(kctx, 1);
                                kbase_mem_usage_release_pages(&kctx->usage, 1);
                        }
                }
@@ -805,7 +802,7 @@ void kbase_mmu_free_pgd(kbase_context *kctx)
        mmu_teardown_level(kctx, kctx->pgd, MIDGARD_MMU_TOPLEVEL, 1, kctx->mmu_teardown_pages);
 
        beenthere("pgd %lx", (unsigned long)kctx->pgd);
-       kbase_phy_pages_free(kctx->kbdev, &kctx->pgd_allocator, 1, &kctx->pgd);
+       kbase_mem_allocator_free(kctx->pgd_allocator, 1, &kctx->pgd);
        kbase_process_page_usage_dec(kctx, 1 );
        kbase_mem_usage_release_pages(&kctx->usage, 1);
 }
index 9801d4d63b92c58e5da3414212fef1159404dacb..1e2859a1d2fcb490880b7062c26ce2d1dcde19fa 100644 (file)
@@ -82,10 +82,19 @@ static void coarse_demand_state_changed(kbase_device *kbdev)
  */
 static void coarse_demand_power_down(kbase_device *kbdev)
 {
+       u64 cores;
+
        /* Inform the system that the transition has started */
        kbase_pm_power_transitioning(kbdev);
 
-       /* No need to turn off the cores */
+       cores = kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER);
+       kbase_pm_invoke_power_down(kbdev, KBASE_PM_CORE_SHADER, cores);
+
+       cores = kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_TILER);
+       kbase_pm_invoke_power_down(kbdev, KBASE_PM_CORE_TILER, cores);
+
+       /* Note we don't call kbase_pm_check_transitions because we don't want to wait
+        * for the above transitions to take place before turning the GPU power domain off */
 
        kbdev->pm.policy_data.coarse_demand.state = KBASEP_PM_COARSE_DEMAND_STATE_POWERING_DOWN;
 
index 52aa5d9c6ed571da8b3e14ad229d09b46612aacb..c7db651e0107f1fccaa841f4849c61f55c978c71 100644 (file)
@@ -146,8 +146,11 @@ KBASE_EXPORT_TEST_API(kbase_pm_invoke_power_up)
 
 void kbase_pm_invoke_power_down(kbase_device *kbdev, kbase_pm_core_type type, u64 cores)
 {
+       unsigned long flags;
        OSK_ASSERT( kbdev != NULL );
 
+       spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+
        switch(type)
        {
                case KBASE_PM_CORE_SHADER:
@@ -158,14 +161,19 @@ void kbase_pm_invoke_power_down(kbase_device *kbdev, kbase_pm_core_type type, u6
                                {
                                        KBASE_TRACE_ADD( kbdev, PM_CORES_CHANGE_DESIRED_ON_POWERDOWN, NULL, NULL, 0u, (u32)kbdev->pm.desired_shader_state );
                                }
+                               /* Also remove the cores from the available set to prevent job submission to
+                                * these cores before the next call to kbase_pm_check_transitions */
+                               kbdev->shader_available_bitmap &= ~cores;
                        }
                        break;
                case KBASE_PM_CORE_TILER:
                        kbdev->pm.desired_tiler_state &= ~cores;
+                       kbdev->tiler_available_bitmap &= ~cores;
                        break;
                default:
                        OSK_ASSERT(0);
        }
+       spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
 }
 KBASE_EXPORT_TEST_API(kbase_pm_invoke_power_down)
 /** Get information about a core set
index 57382f1d0d113c45b8ea79c3a738e84b29146f0e..8aa554fb394f8aad951ebd79d863c0144ece06dc 100644 (file)
@@ -21,6 +21,9 @@
 
 #include <kbase/src/common/mali_kbase.h>
 #include <kbase/src/common/mali_kbase_pm.h>
+#if defined(CONFIG_MALI_T6XX_DVFS) && defined(CONFIG_MACH_MANTA)
+#include <kbase/src/platform/mali_kbase_dvfs.h>
+#endif
 
 /* When VSync is being hit aim for utilisation between 70-90% */
 #define KBASE_PM_VSYNC_MIN_UTILISATION          70
index d41b2ae8011fc7f038e850938402a735ba79153c..93c773f170d0f61096d06cba5954303b542e9b26 100644 (file)
@@ -326,7 +326,6 @@ mali_error kbase_prepare_soft_job(kbase_jd_atom *katom )
                case BASE_JD_REQ_SOFT_FENCE_WAIT:
                        {
                                base_fence fence;
-                               int fd;
                                if (MALI_ERROR_NONE != ukk_copy_from_user(sizeof(fence), &fence, (__user void*)(uintptr_t)katom->jc))
                                {
                                        return MALI_ERROR_FUNCTION_FAILED;
index ca6793ecb481f5081bb257d9e52996dacb274e1f..0ff232eda3ce4a972b129b7bb914ca78cfb80fb5 100644 (file)
@@ -2409,13 +2409,6 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
        kbdev->gpu_props.irq_throttle_time_us = kbasep_get_config_value(kbdev, platform_data,
                                                                       KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US);
 
-       err = kbase_register_memory_regions(kbdev, (kbase_attribute *)osdev->dev->platform_data);
-       if (err)
-       {
-               dev_err(osdev->dev, "Failed to register memory regions\n");
-               goto out_term_dev;
-       }
-
        err = kbase_common_device_init(kbdev);
        if (err)
        {
index 3b9b96f65a0d90fd3735516b6d9999b207fda91d..283962f1821f5b8aa094823a3513ff4d7ba04c37 100644 (file)
@@ -264,6 +264,7 @@ static int  kbase_trace_buffer_mmap(kbase_context * kctx, struct vm_area_struct
 
        new_reg->flags  &= ~KBASE_REG_FREE;
        new_reg->flags  |= KBASE_REG_IS_TB | KBASE_REG_CPU_CACHED;
+       new_reg->nr_alloc_pages = nr_pages;
 
        if (MALI_ERROR_NONE != kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1))
        {
@@ -324,6 +325,7 @@ static int kbase_mmu_dump_mmap( kbase_context *kctx,
 
        new_reg->flags &= ~KBASE_REG_FREE;
        new_reg->flags |= KBASE_REG_IS_MMU_DUMP | KBASE_REG_CPU_CACHED;
+       new_reg->nr_alloc_pages = nr_pages;
 
        if (MALI_ERROR_NONE != kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1))
        {