]> xenbits.xensource.com Git - people/aperard/linux-chromebook.git/commitdiff
CHROMIUM: exynos: acquire/release for GEM objects
authorJohn Sheu <sheu@chromium.org>
Wed, 7 Nov 2012 00:11:28 +0000 (16:11 -0800)
committerGerrit <chrome-bot@google.com>
Tue, 11 Dec 2012 03:24:23 +0000 (19:24 -0800)
* Adds acquire/release ioctls for Exynos GEM objects, to be used
  before/after CPU access.  Uses KDS for synchronization.

BUG=chrome-os-partner:11949
TEST=local build, run on snow

Change-Id: Ie4b04530701adf3054f150cfe5143680efceb531
Signed-off-by: John Sheu <sheu@chromium.org>
Reviewed-on: https://gerrit.chromium.org/gerrit/37756

drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
include/drm/exynos_drm.h

index 8a577a11510bb23f6b1510e66a42ed354572db09..300d01d72ca6015d593fc71eed3e10f0d93d2f28 100644 (file)
@@ -168,16 +168,44 @@ static int exynos_drm_unload(struct drm_device *dev)
 
 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
 {
+       struct exynos_drm_file_private *file_private;
+
        DRM_DEBUG_DRIVER("%s\n", __FILE__);
 
+       file_private = kzalloc(sizeof(*file_private), GFP_KERNEL);
+       if (!file_private) {
+               DRM_ERROR("failed to allocate exynos_drm_file_private\n");
+               return -ENOMEM;
+       }
+       INIT_LIST_HEAD(&file_private->gem_cpu_acquire_list);
+
+       file->driver_priv = file_private;
+
        return exynos_drm_subdrv_open(dev, file);
 }
 
 static void exynos_drm_preclose(struct drm_device *dev,
                                        struct drm_file *file)
 {
+       struct exynos_drm_file_private *file_private = file->driver_priv;
+       struct exynos_drm_gem_obj_node *cur, *d;
+
        DRM_DEBUG_DRIVER("%s\n", __FILE__);
 
+       mutex_lock(&dev->struct_mutex);
+       /* release kds resource sets for outstanding GEM object acquires */
+       list_for_each_entry_safe(cur, d,
+                       &file_private->gem_cpu_acquire_list, list) {
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+               BUG_ON(cur->exynos_gem_obj->resource_set == NULL);
+               kds_resource_set_release(&cur->exynos_gem_obj->resource_set);
+#endif
+               drm_gem_object_unreference(&cur->exynos_gem_obj->base);
+               kfree(cur);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       INIT_LIST_HEAD(&file_private->gem_cpu_acquire_list);
+
        exynos_drm_subdrv_close(dev, file);
 }
 
@@ -217,6 +245,12 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
                        DRM_UNLOCKED | DRM_AUTH),
        DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
                        vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CPU_ACQUIRE,
+                       exynos_drm_gem_cpu_acquire_ioctl,
+                       DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CPU_RELEASE,
+                       exynos_drm_gem_cpu_release_ioctl,
+                       DRM_UNLOCKED | DRM_AUTH),
 };
 
 static const struct file_operations exynos_drm_driver_fops = {
index 854e6bc3a1a5c49161792add6e9b0eb533f9b2bf..f4b92d1856cb5431e5491c100d3b6808d574b0b1 100644 (file)
@@ -149,6 +149,15 @@ struct exynos_drm_private {
 #endif
 };
 
+/*
+ * Exynos drm_file private structure.
+ *
+ * @gem_cpu_acquire_list: list of GEM objects we hold acquires on
+ */
+struct exynos_drm_file_private {
+       struct list_head gem_cpu_acquire_list;
+};
+
 /*
  * Exynos drm sub driver structure.
  *
index c7ee56680e65a5187224d287d823ba3319e34d72..9bd46629dfa2f9cfb34ab0da12bcb2c4a0826eb2 100644 (file)
@@ -26,6 +26,8 @@
 #include "drmP.h"
 #include "drm.h"
 
+#include <linux/completion.h>
+#include <linux/kds.h>
 #include <linux/shmem_fs.h>
 #include <drm/exynos_drm.h>
 
@@ -286,6 +288,11 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 
        DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
 
+       if (exynos_gem_obj->resource_set != NULL) {
+               /* kds_resource_set_release NULLs the pointer */
+               kds_resource_set_release(&exynos_gem_obj->resource_set);
+       }
+
        if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
                        exynos_gem_obj->buffer->pages)
                exynos_drm_gem_put_pages(obj);
@@ -600,6 +607,177 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+static void cpu_acquire_kds_cb_fn(void *param1, void *param2)
+{
+       struct completion* completion = (struct completion *)param1;
+       complete(completion);
+}
+
+int exynos_drm_gem_cpu_acquire_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file)
+{
+       struct drm_exynos_gem_cpu_acquire *args = data;
+       struct exynos_drm_file_private *file_priv = file->driver_priv;
+       struct drm_gem_object *obj;
+       struct exynos_drm_gem_obj *exynos_gem_obj;
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       struct kds_resource *kds;
+       struct kds_resource_set *rset;
+       unsigned long exclusive;
+       struct kds_callback callback;
+       DECLARE_COMPLETION_ONSTACK(completion);
+#endif
+       struct exynos_drm_gem_obj_node *gem_node;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (!(dev->driver->driver_features & DRIVER_GEM)) {
+               DRM_ERROR("does not support GEM.\n");
+               ret = -ENODEV;
+               goto unlock;
+       }
+
+       obj = drm_gem_object_lookup(dev, file, args->handle);
+       if (!obj) {
+               DRM_ERROR("failed to lookup gem object.\n");
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       exynos_gem_obj = to_exynos_gem_obj(obj);
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       if (exynos_gem_obj->base.export_dma_buf == NULL) {
+               /* If there is no dmabuf present, there is no cross-process/
+                * cross-device sharing and sync is unnecessary.
+                */
+               ret = 0;
+               goto unref_obj;
+       }
+
+       exclusive = 0;
+       if ((args->flags & DRM_EXYNOS_GEM_CPU_ACQUIRE_EXCLUSIVE) != 0)
+               exclusive = 1;
+       kds = &exynos_gem_obj->base.export_dma_buf->kds;
+       kds_callback_init(&callback, 1, &cpu_acquire_kds_cb_fn);
+       ret = kds_async_waitall(&rset, KDS_FLAG_LOCKED_WAIT, &callback,
+               &completion, NULL, 1, &exclusive, &kds);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (!IS_ERR_VALUE(ret))
+               ret = wait_for_completion_interruptible(&completion);
+       kds_callback_term(&callback);
+
+       mutex_lock(&dev->struct_mutex);
+       if (IS_ERR_VALUE(ret))
+               goto release_rset;
+#endif
+
+       gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
+       if (!gem_node) {
+               DRM_ERROR("failed to allocate eyxnos_drm_gem_obj_node.\n");
+               ret = -ENOMEM;
+               goto release_rset;
+       }
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       exynos_gem_obj->resource_set = rset;
+#endif
+
+       gem_node->exynos_gem_obj = exynos_gem_obj;
+       list_add(&gem_node->list, &file_priv->gem_cpu_acquire_list);
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+
+
+release_rset:
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       kds_resource_set_release_sync(&rset);
+#endif
+
+unref_obj:
+       drm_gem_object_unreference(obj);
+
+unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+int exynos_drm_gem_cpu_release_ioctl(struct drm_device *dev, void* data,
+                               struct drm_file *file)
+{
+       struct drm_exynos_gem_cpu_acquire *args = data;
+       struct exynos_drm_file_private *file_priv = file->driver_priv;
+       struct drm_gem_object *obj;
+       struct exynos_drm_gem_obj *exynos_gem_obj;
+       struct list_head *cur;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (!(dev->driver->driver_features & DRIVER_GEM)) {
+               DRM_ERROR("does not support GEM.\n");
+               ret = -ENODEV;
+               goto unlock;
+       }
+
+       obj = drm_gem_object_lookup(dev, file, args->handle);
+       if (!obj) {
+               DRM_ERROR("failed to lookup gem object.\n");
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       exynos_gem_obj = to_exynos_gem_obj(obj);
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       if (exynos_gem_obj->base.export_dma_buf == NULL) {
+               /* If there is no dmabuf present, there is no cross-process/
+                * cross-device sharing and sync is unnecessary.
+                */
+               ret = 0;
+               goto unref_obj;
+       }
+#endif
+
+       list_for_each(cur, &file_priv->gem_cpu_acquire_list) {
+               struct exynos_drm_gem_obj_node *node = list_entry(
+                               cur, struct exynos_drm_gem_obj_node, list);
+               if (node->exynos_gem_obj == exynos_gem_obj)
+                       break;
+       }
+       if (cur == &file_priv->gem_cpu_acquire_list) {
+               DRM_ERROR("gem object not acquired for current process.\n");
+               ret = -EINVAL;
+               goto unref_obj;
+       }
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       /* kds_resource_set_release NULLs the pointer */
+       BUG_ON(exynos_gem_obj->resource_set == NULL);
+       kds_resource_set_release(&exynos_gem_obj->resource_set);
+#endif
+
+       list_del(cur);
+       kfree(list_entry(cur, struct exynos_drm_gem_obj_node, list));
+       /* unreference for the reference held since cpu_acquire_ioctl */
+       drm_gem_object_unreference(obj);
+       ret = 0;
+
+unref_obj:
+       /* unreference for the reference from drm_gem_object_lookup() */
+       drm_gem_object_unreference(obj);
+
+unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
 {
        DRM_DEBUG_KMS("%s\n", __FILE__);
index ea69d125e7b2065b7312705406890b9daab5a145..25447ac759392cfec2dffbd331cc1c51634d4200 100644 (file)
@@ -63,6 +63,8 @@ struct exynos_drm_gem_buf {
  *     by user request or at framebuffer creation.
  *     continuous memory region allocated by user request
  *     or at framebuffer creation.
+ * @resource_set: the KDS resource set held by the currently outstanding CPU
+ *     acquire (if any).
  * @flags: indicate memory type to allocated buffer and cache attribute.
  *
  * P.S. this object would be transfered to user as kms_bo.handle so
@@ -71,9 +73,23 @@ struct exynos_drm_gem_buf {
 struct exynos_drm_gem_obj {
        struct drm_gem_object           base;
        struct exynos_drm_gem_buf       *buffer;
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       struct kds_resource_set         *resource_set;
+#endif
        unsigned int                    flags;
 };
 
+/*
+ * exynos drm buffer linked list structure.
+ *
+ * @list: list link.
+ * @gexynos_gem_obj: struct exynos_drm_gem_obj that this entry points to.
+ */
+struct exynos_drm_gem_obj_node {
+       struct list_head                list;
+       struct exynos_drm_gem_obj       *exynos_gem_obj;
+};
+
 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
 
 /* destroy a buffer with gem object */
@@ -125,6 +141,17 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
 
+/*
+ * acquire gem object for CPU access.
+ */
+int exynos_drm_gem_cpu_acquire_ioctl(struct drm_device *dev, void* data,
+                              struct drm_file *file_priv);
+/*
+ * release gem object after CPU access.
+ */
+int exynos_drm_gem_cpu_release_ioctl(struct drm_device *dev, void* data,
+                              struct drm_file *file_priv);
+
 /* initialize gem object. */
 int exynos_drm_gem_init_object(struct drm_gem_object *obj);
 
index 7d15c84d501220a2ec2eadfa0d95522386ae078b..dc01f66badae41594f495a2c981d53b9fdeaefc3 100644 (file)
@@ -74,6 +74,11 @@ struct drm_exynos_gem_mmap {
        uint64_t mapped;
 };
 
+struct drm_exynos_plane_set_zpos {
+       __u32 plane_id;
+       __s32 zpos;
+};
+
 /**
  * A structure for user connection request of virtual display.
  *
@@ -88,13 +93,33 @@ struct drm_exynos_vidi_connection {
        uint64_t edid;
 };
 
-struct drm_exynos_plane_set_zpos {
-       __u32 plane_id;
-       __s32 zpos;
+/* acquire type definitions. */
+enum drm_exynos_gem_cpu_acquire_type {
+       DRM_EXYNOS_GEM_CPU_ACQUIRE_SHARED = 0x0,
+       DRM_EXYNOS_GEM_CPU_ACQUIRE_EXCLUSIVE = 0x1,
+};
+
+/**
+ * A structure for acquiring buffer for CPU access.
+ *
+ * @handle: a handle to gem object created.
+ */
+struct drm_exynos_gem_cpu_acquire {
+       unsigned int handle;
+       unsigned int flags;
+};
+
+/*
+ * A structure for releasing buffer for GPU access.
+ *
+ * @handle: a handle to gem object created.
+ */
+struct drm_exynos_gem_cpu_release {
+       unsigned int handle;
 };
 
 /* memory type definitions. */
-enum e_drm_exynos_gem_mem_type {
+enum drm_exynos_gem_mem_type {
        /* Physically Non-Continuous memory. */
        EXYNOS_BO_NONCONTIG     = 1 << 0,
        EXYNOS_BO_MASK          = EXYNOS_BO_NONCONTIG
@@ -106,6 +131,8 @@ enum e_drm_exynos_gem_mem_type {
 /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
 #define DRM_EXYNOS_PLANE_SET_ZPOS      0x06
 #define DRM_EXYNOS_VIDI_CONNECTION     0x07
+#define DRM_EXYNOS_GEM_CPU_ACQUIRE     0x08
+#define DRM_EXYNOS_GEM_CPU_RELEASE     0x09
 
 #define DRM_IOCTL_EXYNOS_GEM_CREATE            DRM_IOWR(DRM_COMMAND_BASE + \
                DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -122,6 +149,12 @@ enum e_drm_exynos_gem_mem_type {
 #define DRM_IOCTL_EXYNOS_VIDI_CONNECTION       DRM_IOWR(DRM_COMMAND_BASE + \
                DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection)
 
+#define DRM_IOCTL_EXYNOS_GEM_CPU_ACQUIRE       DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_GEM_CPU_ACQUIRE, struct drm_exynos_gem_cpu_acquire)
+
+#define DRM_IOCTL_EXYNOS_GEM_CPU_RELEASE       DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_GEM_CPU_RELEASE, struct drm_exynos_gem_cpu_release)
+
 #ifdef __KERNEL__
 
 /**